VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 92561

Last change on this file since 92561 was 92553, checked in by vboxsync, 3 years ago

VMM/IEM: Tweaked iemFpuUpdateOpcodeAndIpWorker and iemFpuUpdateDP for long mode. Trouble with bs3-fpustat-1 on NEM/linux. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 652.4 KB
Line 
1/* $Id: IEMAll.cpp 92553 2021-11-22 18:31:11Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler.
442 */
443# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
444 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
445
446#else
447# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
448# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
450# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
453# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
457
458#endif
459
460#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
461/**
462 * Check if an SVM control/instruction intercept is set.
463 */
464# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
465 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
466
467/**
468 * Check if an SVM read CRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
471 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
472
473/**
474 * Check if an SVM write CRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM read DRx intercept is set.
481 */
482# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
483 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
484
485/**
486 * Check if an SVM write DRx intercept is set.
487 */
488# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM exception intercept is set.
493 */
494# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
495 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
496
497/**
498 * Invokes the SVM \#VMEXIT handler for the nested-guest.
499 */
500# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
501 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
502
503/**
504 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
505 * corresponding decode assist information.
506 */
507# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
508 do \
509 { \
510 uint64_t uExitInfo1; \
511 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
512 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
513 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
514 else \
515 uExitInfo1 = 0; \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
517 } while (0)
518
519/** Check and handles SVM nested-guest instruction intercept and updates
520 * NRIP if needed.
521 */
522# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
523 do \
524 { \
525 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
526 { \
527 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
528 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
529 } \
530 } while (0)
531
532/** Checks and handles SVM nested-guest CR0 read intercept. */
533# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
534 do \
535 { \
536 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
537 { /* probably likely */ } \
538 else \
539 { \
540 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
541 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
542 } \
543 } while (0)
544
545/**
546 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
547 */
548# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
549 do { \
550 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
551 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
552 } while (0)
553
554#else
555# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
556# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
557# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
559# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
561# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
562# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
564# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
566
567#endif
568
569
570/*********************************************************************************************************************************
571* Global Variables *
572*********************************************************************************************************************************/
573extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
574
575
576/** Function table for the ADD instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
578{
579 iemAImpl_add_u8, iemAImpl_add_u8_locked,
580 iemAImpl_add_u16, iemAImpl_add_u16_locked,
581 iemAImpl_add_u32, iemAImpl_add_u32_locked,
582 iemAImpl_add_u64, iemAImpl_add_u64_locked
583};
584
585/** Function table for the ADC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
587{
588 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
589 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
590 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
591 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
592};
593
594/** Function table for the SUB instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
596{
597 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
598 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
599 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
600 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
601};
602
603/** Function table for the SBB instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
605{
606 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
607 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
608 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
609 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
610};
611
612/** Function table for the OR instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
614{
615 iemAImpl_or_u8, iemAImpl_or_u8_locked,
616 iemAImpl_or_u16, iemAImpl_or_u16_locked,
617 iemAImpl_or_u32, iemAImpl_or_u32_locked,
618 iemAImpl_or_u64, iemAImpl_or_u64_locked
619};
620
621/** Function table for the XOR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
623{
624 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
625 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
626 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
627 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
628};
629
630/** Function table for the AND instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
632{
633 iemAImpl_and_u8, iemAImpl_and_u8_locked,
634 iemAImpl_and_u16, iemAImpl_and_u16_locked,
635 iemAImpl_and_u32, iemAImpl_and_u32_locked,
636 iemAImpl_and_u64, iemAImpl_and_u64_locked
637};
638
639/** Function table for the CMP instruction.
640 * @remarks Making operand order ASSUMPTIONS.
641 */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
643{
644 iemAImpl_cmp_u8, NULL,
645 iemAImpl_cmp_u16, NULL,
646 iemAImpl_cmp_u32, NULL,
647 iemAImpl_cmp_u64, NULL
648};
649
650/** Function table for the TEST instruction.
651 * @remarks Making operand order ASSUMPTIONS.
652 */
653IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
654{
655 iemAImpl_test_u8, NULL,
656 iemAImpl_test_u16, NULL,
657 iemAImpl_test_u32, NULL,
658 iemAImpl_test_u64, NULL
659};
660
661/** Function table for the BT instruction. */
662IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
663{
664 NULL, NULL,
665 iemAImpl_bt_u16, NULL,
666 iemAImpl_bt_u32, NULL,
667 iemAImpl_bt_u64, NULL
668};
669
670/** Function table for the BTC instruction. */
671IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
672{
673 NULL, NULL,
674 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
675 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
676 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
677};
678
679/** Function table for the BTR instruction. */
680IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
681{
682 NULL, NULL,
683 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
684 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
685 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
686};
687
688/** Function table for the BTS instruction. */
689IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
690{
691 NULL, NULL,
692 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
693 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
694 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
695};
696
697/** Function table for the BSF instruction. */
698IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
699{
700 NULL, NULL,
701 iemAImpl_bsf_u16, NULL,
702 iemAImpl_bsf_u32, NULL,
703 iemAImpl_bsf_u64, NULL
704};
705
706/** Function table for the BSR instruction. */
707IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
708{
709 NULL, NULL,
710 iemAImpl_bsr_u16, NULL,
711 iemAImpl_bsr_u32, NULL,
712 iemAImpl_bsr_u64, NULL
713};
714
715/** Function table for the IMUL instruction. */
716IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
717{
718 NULL, NULL,
719 iemAImpl_imul_two_u16, NULL,
720 iemAImpl_imul_two_u32, NULL,
721 iemAImpl_imul_two_u64, NULL
722};
723
724/** Group 1 /r lookup table. */
725IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
726{
727 &g_iemAImpl_add,
728 &g_iemAImpl_or,
729 &g_iemAImpl_adc,
730 &g_iemAImpl_sbb,
731 &g_iemAImpl_and,
732 &g_iemAImpl_sub,
733 &g_iemAImpl_xor,
734 &g_iemAImpl_cmp
735};
736
737/** Function table for the INC instruction. */
738IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
739{
740 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
741 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
742 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
743 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
744};
745
746/** Function table for the DEC instruction. */
747IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
748{
749 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
750 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
751 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
752 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
753};
754
755/** Function table for the NEG instruction. */
756IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
757{
758 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
759 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
760 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
761 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
762};
763
764/** Function table for the NOT instruction. */
765IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
766{
767 iemAImpl_not_u8, iemAImpl_not_u8_locked,
768 iemAImpl_not_u16, iemAImpl_not_u16_locked,
769 iemAImpl_not_u32, iemAImpl_not_u32_locked,
770 iemAImpl_not_u64, iemAImpl_not_u64_locked
771};
772
773
774/** Function table for the ROL instruction. */
775IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
776{
777 iemAImpl_rol_u8,
778 iemAImpl_rol_u16,
779 iemAImpl_rol_u32,
780 iemAImpl_rol_u64
781};
782
783/** Function table for the ROR instruction. */
784IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
785{
786 iemAImpl_ror_u8,
787 iemAImpl_ror_u16,
788 iemAImpl_ror_u32,
789 iemAImpl_ror_u64
790};
791
792/** Function table for the RCL instruction. */
793IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
794{
795 iemAImpl_rcl_u8,
796 iemAImpl_rcl_u16,
797 iemAImpl_rcl_u32,
798 iemAImpl_rcl_u64
799};
800
801/** Function table for the RCR instruction. */
802IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
803{
804 iemAImpl_rcr_u8,
805 iemAImpl_rcr_u16,
806 iemAImpl_rcr_u32,
807 iemAImpl_rcr_u64
808};
809
810/** Function table for the SHL instruction. */
811IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
812{
813 iemAImpl_shl_u8,
814 iemAImpl_shl_u16,
815 iemAImpl_shl_u32,
816 iemAImpl_shl_u64
817};
818
819/** Function table for the SHR instruction. */
820IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
821{
822 iemAImpl_shr_u8,
823 iemAImpl_shr_u16,
824 iemAImpl_shr_u32,
825 iemAImpl_shr_u64
826};
827
828/** Function table for the SAR instruction. */
829IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
830{
831 iemAImpl_sar_u8,
832 iemAImpl_sar_u16,
833 iemAImpl_sar_u32,
834 iemAImpl_sar_u64
835};
836
837
838/** Function table for the MUL instruction. */
839IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
840{
841 iemAImpl_mul_u8,
842 iemAImpl_mul_u16,
843 iemAImpl_mul_u32,
844 iemAImpl_mul_u64
845};
846
847/** Function table for the IMUL instruction working implicitly on rAX. */
848IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
849{
850 iemAImpl_imul_u8,
851 iemAImpl_imul_u16,
852 iemAImpl_imul_u32,
853 iemAImpl_imul_u64
854};
855
856/** Function table for the DIV instruction. */
857IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
858{
859 iemAImpl_div_u8,
860 iemAImpl_div_u16,
861 iemAImpl_div_u32,
862 iemAImpl_div_u64
863};
864
865/** Function table for the MUL instruction. */
866IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
867{
868 iemAImpl_idiv_u8,
869 iemAImpl_idiv_u16,
870 iemAImpl_idiv_u32,
871 iemAImpl_idiv_u64
872};
873
874/** Function table for the SHLD instruction */
875IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
876{
877 iemAImpl_shld_u16,
878 iemAImpl_shld_u32,
879 iemAImpl_shld_u64,
880};
881
882/** Function table for the SHRD instruction */
883IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
884{
885 iemAImpl_shrd_u16,
886 iemAImpl_shrd_u32,
887 iemAImpl_shrd_u64,
888};
889
890
891/** Function table for the PUNPCKLBW instruction */
892IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
893/** Function table for the PUNPCKLBD instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
895/** Function table for the PUNPCKLDQ instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
897/** Function table for the PUNPCKLQDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
899
900/** Function table for the PUNPCKHBW instruction */
901IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
902/** Function table for the PUNPCKHBD instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
904/** Function table for the PUNPCKHDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
906/** Function table for the PUNPCKHQDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
908
909/** Function table for the PXOR instruction */
910IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
911/** Function table for the PCMPEQB instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
913/** Function table for the PCMPEQW instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
915/** Function table for the PCMPEQD instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
917
918
919#if defined(IEM_LOG_MEMORY_WRITES)
920/** What IEM just wrote. */
921uint8_t g_abIemWrote[256];
922/** How much IEM just wrote. */
923size_t g_cbIemWrote;
924#endif
925
926
927/*********************************************************************************************************************************
928* Internal Functions *
929*********************************************************************************************************************************/
930IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
934/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
935IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
946IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
947#ifdef IEM_WITH_SETJMP
948DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
953#endif
954
955IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
956IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
970IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
971DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
972DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
973
974#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
975IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
981IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
982#endif
983
984#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
985IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
986IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
987#endif
988
989
990/**
991 * Sets the pass up status.
992 *
993 * @returns VINF_SUCCESS.
994 * @param pVCpu The cross context virtual CPU structure of the
995 * calling thread.
996 * @param rcPassUp The pass up status. Must be informational.
997 * VINF_SUCCESS is not allowed.
998 */
999IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1000{
1001 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1002
1003 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1004 if (rcOldPassUp == VINF_SUCCESS)
1005 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1006 /* If both are EM scheduling codes, use EM priority rules. */
1007 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1008 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1009 {
1010 if (rcPassUp < rcOldPassUp)
1011 {
1012 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1013 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1014 }
1015 else
1016 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1017 }
1018 /* Override EM scheduling with specific status code. */
1019 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1020 {
1021 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1023 }
1024 /* Don't override specific status code, first come first served. */
1025 else
1026 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 return VINF_SUCCESS;
1028}
1029
1030
1031/**
1032 * Calculates the CPU mode.
1033 *
1034 * This is mainly for updating IEMCPU::enmCpuMode.
1035 *
1036 * @returns CPU mode.
1037 * @param pVCpu The cross context virtual CPU structure of the
1038 * calling thread.
1039 */
1040DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1041{
1042 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1043 return IEMMODE_64BIT;
1044 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1045 return IEMMODE_32BIT;
1046 return IEMMODE_16BIT;
1047}
1048
1049
1050/**
1051 * Initializes the execution state.
1052 *
1053 * @param pVCpu The cross context virtual CPU structure of the
1054 * calling thread.
1055 * @param fBypassHandlers Whether to bypass access handlers.
1056 *
1057 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1058 * side-effects in strict builds.
1059 */
1060DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1061{
1062 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1072
1073 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1074 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1075#ifdef VBOX_STRICT
1076 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1077 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1078 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1079 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1080 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1081 pVCpu->iem.s.uRexReg = 127;
1082 pVCpu->iem.s.uRexB = 127;
1083 pVCpu->iem.s.offModRm = 127;
1084 pVCpu->iem.s.uRexIndex = 127;
1085 pVCpu->iem.s.iEffSeg = 127;
1086 pVCpu->iem.s.idxPrefix = 127;
1087 pVCpu->iem.s.uVex3rdReg = 127;
1088 pVCpu->iem.s.uVexLength = 127;
1089 pVCpu->iem.s.fEvexStuff = 127;
1090 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1091# ifdef IEM_WITH_CODE_TLB
1092 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1093 pVCpu->iem.s.pbInstrBuf = NULL;
1094 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1095 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1096 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1097 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1098# else
1099 pVCpu->iem.s.offOpcode = 127;
1100 pVCpu->iem.s.cbOpcode = 127;
1101# endif
1102#endif
1103
1104 pVCpu->iem.s.cActiveMappings = 0;
1105 pVCpu->iem.s.iNextMapping = 0;
1106 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1107 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1108#if 0
1109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1110 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1111 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1112 {
1113 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1114 Assert(pVmcs);
1115 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1116 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1117 {
1118 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1119 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1120 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1121 AssertRC(rc);
1122 }
1123 }
1124#endif
1125#endif
1126}
1127
1128#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1129/**
1130 * Performs a minimal reinitialization of the execution state.
1131 *
1132 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1133 * 'world-switch' types operations on the CPU. Currently only nested
1134 * hardware-virtualization uses it.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1137 */
1138IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1139{
1140 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1141 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1142
1143 pVCpu->iem.s.uCpl = uCpl;
1144 pVCpu->iem.s.enmCpuMode = enmMode;
1145 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffAddrMode = enmMode;
1147 if (enmMode != IEMMODE_64BIT)
1148 {
1149 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1150 pVCpu->iem.s.enmEffOpSize = enmMode;
1151 }
1152 else
1153 {
1154 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1155 pVCpu->iem.s.enmEffOpSize = enmMode;
1156 }
1157 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1158#ifndef IEM_WITH_CODE_TLB
1159 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1160 pVCpu->iem.s.offOpcode = 0;
1161 pVCpu->iem.s.cbOpcode = 0;
1162#endif
1163 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1164}
1165#endif
1166
1167/**
1168 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure of the
1171 * calling thread.
1172 */
1173DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1174{
1175 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1176#ifdef VBOX_STRICT
1177# ifdef IEM_WITH_CODE_TLB
1178 NOREF(pVCpu);
1179# else
1180 pVCpu->iem.s.cbOpcode = 0;
1181# endif
1182#else
1183 NOREF(pVCpu);
1184#endif
1185}
1186
1187
1188/**
1189 * Initializes the decoder state.
1190 *
1191 * iemReInitDecoder is mostly a copy of this function.
1192 *
1193 * @param pVCpu The cross context virtual CPU structure of the
1194 * calling thread.
1195 * @param fBypassHandlers Whether to bypass access handlers.
1196 * @param fDisregardLock Whether to disregard the LOCK prefix.
1197 */
1198DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1199{
1200 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1201 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1210
1211 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1212 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1213 pVCpu->iem.s.enmCpuMode = enmMode;
1214 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1215 pVCpu->iem.s.enmEffAddrMode = enmMode;
1216 if (enmMode != IEMMODE_64BIT)
1217 {
1218 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1219 pVCpu->iem.s.enmEffOpSize = enmMode;
1220 }
1221 else
1222 {
1223 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1224 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1225 }
1226 pVCpu->iem.s.fPrefixes = 0;
1227 pVCpu->iem.s.uRexReg = 0;
1228 pVCpu->iem.s.uRexB = 0;
1229 pVCpu->iem.s.uRexIndex = 0;
1230 pVCpu->iem.s.idxPrefix = 0;
1231 pVCpu->iem.s.uVex3rdReg = 0;
1232 pVCpu->iem.s.uVexLength = 0;
1233 pVCpu->iem.s.fEvexStuff = 0;
1234 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1235#ifdef IEM_WITH_CODE_TLB
1236 pVCpu->iem.s.pbInstrBuf = NULL;
1237 pVCpu->iem.s.offInstrNextByte = 0;
1238 pVCpu->iem.s.offCurInstrStart = 0;
1239# ifdef VBOX_STRICT
1240 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1241 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1242 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1243# endif
1244#else
1245 pVCpu->iem.s.offOpcode = 0;
1246 pVCpu->iem.s.cbOpcode = 0;
1247#endif
1248 pVCpu->iem.s.offModRm = 0;
1249 pVCpu->iem.s.cActiveMappings = 0;
1250 pVCpu->iem.s.iNextMapping = 0;
1251 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1252 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1253 pVCpu->iem.s.fDisregardLock = fDisregardLock;
1254
1255#ifdef DBGFTRACE_ENABLED
1256 switch (enmMode)
1257 {
1258 case IEMMODE_64BIT:
1259 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1260 break;
1261 case IEMMODE_32BIT:
1262 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1263 break;
1264 case IEMMODE_16BIT:
1265 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1266 break;
1267 }
1268#endif
1269}
1270
1271
1272/**
1273 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1274 *
1275 * This is mostly a copy of iemInitDecoder.
1276 *
1277 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1278 */
1279DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1280{
1281 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1284 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1285 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1286 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1289 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1290
1291 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1292 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1293 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1294 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1295 pVCpu->iem.s.enmEffAddrMode = enmMode;
1296 if (enmMode != IEMMODE_64BIT)
1297 {
1298 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1299 pVCpu->iem.s.enmEffOpSize = enmMode;
1300 }
1301 else
1302 {
1303 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1304 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1305 }
1306 pVCpu->iem.s.fPrefixes = 0;
1307 pVCpu->iem.s.uRexReg = 0;
1308 pVCpu->iem.s.uRexB = 0;
1309 pVCpu->iem.s.uRexIndex = 0;
1310 pVCpu->iem.s.idxPrefix = 0;
1311 pVCpu->iem.s.uVex3rdReg = 0;
1312 pVCpu->iem.s.uVexLength = 0;
1313 pVCpu->iem.s.fEvexStuff = 0;
1314 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1315#ifdef IEM_WITH_CODE_TLB
1316 if (pVCpu->iem.s.pbInstrBuf)
1317 {
1318 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1319 - pVCpu->iem.s.uInstrBufPc;
1320 if (off < pVCpu->iem.s.cbInstrBufTotal)
1321 {
1322 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1323 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1324 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1325 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1326 else
1327 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1328 }
1329 else
1330 {
1331 pVCpu->iem.s.pbInstrBuf = NULL;
1332 pVCpu->iem.s.offInstrNextByte = 0;
1333 pVCpu->iem.s.offCurInstrStart = 0;
1334 pVCpu->iem.s.cbInstrBuf = 0;
1335 pVCpu->iem.s.cbInstrBufTotal = 0;
1336 }
1337 }
1338 else
1339 {
1340 pVCpu->iem.s.offInstrNextByte = 0;
1341 pVCpu->iem.s.offCurInstrStart = 0;
1342 pVCpu->iem.s.cbInstrBuf = 0;
1343 pVCpu->iem.s.cbInstrBufTotal = 0;
1344 }
1345#else
1346 pVCpu->iem.s.cbOpcode = 0;
1347 pVCpu->iem.s.offOpcode = 0;
1348#endif
1349 pVCpu->iem.s.offModRm = 0;
1350 Assert(pVCpu->iem.s.cActiveMappings == 0);
1351 pVCpu->iem.s.iNextMapping = 0;
1352 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1353 Assert(pVCpu->iem.s.fBypassHandlers == false);
1354
1355#ifdef DBGFTRACE_ENABLED
1356 switch (enmMode)
1357 {
1358 case IEMMODE_64BIT:
1359 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1360 break;
1361 case IEMMODE_32BIT:
1362 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1363 break;
1364 case IEMMODE_16BIT:
1365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1366 break;
1367 }
1368#endif
1369}
1370
1371
1372
1373/**
1374 * Prefetch opcodes the first time when starting executing.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pVCpu The cross context virtual CPU structure of the
1378 * calling thread.
1379 * @param fBypassHandlers Whether to bypass access handlers.
1380 * @param fDisregardLock Whether to disregard LOCK prefixes.
1381 *
1382 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
1383 * store them as such.
1384 */
1385IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1386{
1387 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
1388
1389#ifdef IEM_WITH_CODE_TLB
1390 /** @todo Do ITLB lookup here. */
1391
1392#else /* !IEM_WITH_CODE_TLB */
1393
1394 /*
1395 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1396 *
1397 * First translate CS:rIP to a physical address.
1398 */
1399 uint32_t cbToTryRead;
1400 RTGCPTR GCPtrPC;
1401 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1402 {
1403 cbToTryRead = PAGE_SIZE;
1404 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1405 if (IEM_IS_CANONICAL(GCPtrPC))
1406 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1407 else
1408 return iemRaiseGeneralProtectionFault0(pVCpu);
1409 }
1410 else
1411 {
1412 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1413 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1414 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1415 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1416 else
1417 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1418 if (cbToTryRead) { /* likely */ }
1419 else /* overflowed */
1420 {
1421 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1422 cbToTryRead = UINT32_MAX;
1423 }
1424 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1425 Assert(GCPtrPC <= UINT32_MAX);
1426 }
1427
1428 PGMPTWALK Walk;
1429 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
1430 if (RT_SUCCESS(rc))
1431 Assert(Walk.fSucceeded); /* probable. */
1432 else
1433 {
1434 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1435 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1436 }
1437 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1438 else
1439 {
1440 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1441 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1442 }
1443 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1444 else
1445 {
1446 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1448 }
1449 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & PAGE_OFFSET_MASK);
1450 /** @todo Check reserved bits and such stuff. PGM is better at doing
1451 * that, so do it when implementing the guest virtual address
1452 * TLB... */
1453
1454 /*
1455 * Read the bytes at this address.
1456 */
1457 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1458 if (cbToTryRead > cbLeftOnPage)
1459 cbToTryRead = cbLeftOnPage;
1460 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1461 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1462
1463 if (!pVCpu->iem.s.fBypassHandlers)
1464 {
1465 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1467 { /* likely */ }
1468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1469 {
1470 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1473 }
1474 else
1475 {
1476 Log((RT_SUCCESS(rcStrict)
1477 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1478 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1479 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1480 return rcStrict;
1481 }
1482 }
1483 else
1484 {
1485 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1486 if (RT_SUCCESS(rc))
1487 { /* likely */ }
1488 else
1489 {
1490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1491 GCPtrPC, GCPhys, rc, cbToTryRead));
1492 return rc;
1493 }
1494 }
1495 pVCpu->iem.s.cbOpcode = cbToTryRead;
1496#endif /* !IEM_WITH_CODE_TLB */
1497 return VINF_SUCCESS;
1498}
1499
1500
1501/**
1502 * Invalidates the IEM TLBs.
1503 *
1504 * This is called internally as well as by PGM when moving GC mappings.
1505 *
1506 * @returns
1507 * @param pVCpu The cross context virtual CPU structure of the calling
1508 * thread.
1509 * @param fVmm Set when PGM calls us with a remapping.
1510 */
1511VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1512{
1513#ifdef IEM_WITH_CODE_TLB
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1516 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1517 { /* very likely */ }
1518 else
1519 {
1520 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1521 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1522 while (i-- > 0)
1523 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1524 }
1525#endif
1526
1527#ifdef IEM_WITH_DATA_TLB
1528 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539 NOREF(pVCpu); NOREF(fVmm);
1540}
1541
1542
1543/**
1544 * Invalidates a page in the TLBs.
1545 *
1546 * @param pVCpu The cross context virtual CPU structure of the calling
1547 * thread.
1548 * @param GCPtr The address of the page to invalidate
1549 */
1550VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1551{
1552#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1553 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1556 uintptr_t idx = (uint8_t)GCPtr;
1557
1558# ifdef IEM_WITH_CODE_TLB
1559 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1560 {
1561 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1562 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1563 pVCpu->iem.s.cbInstrBufTotal = 0;
1564 }
1565# endif
1566
1567# ifdef IEM_WITH_DATA_TLB
1568 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1569 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1570# endif
1571#else
1572 NOREF(pVCpu); NOREF(GCPtr);
1573#endif
1574}
1575
1576
1577/**
1578 * Invalidates the host physical aspects of the IEM TLBs.
1579 *
1580 * This is called internally as well as by PGM when moving GC mappings.
1581 *
1582 * @param pVCpu The cross context virtual CPU structure of the calling
1583 * thread.
1584 */
1585VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1586{
1587#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1588 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1589
1590# ifdef IEM_WITH_CODE_TLB
1591 pVCpu->iem.s.cbInstrBufTotal = 0;
1592# endif
1593 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1594 if (uTlbPhysRev != 0)
1595 {
1596 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1597 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1598 }
1599 else
1600 {
1601 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1603
1604 unsigned i;
1605# ifdef IEM_WITH_CODE_TLB
1606 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1607 while (i-- > 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1610 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1611 }
1612# endif
1613# ifdef IEM_WITH_DATA_TLB
1614 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1615 while (i-- > 0)
1616 {
1617 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1618 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1619 }
1620# endif
1621 }
1622#else
1623 NOREF(pVCpu);
1624#endif
1625}
1626
1627
1628/**
1629 * Invalidates the host physical aspects of the IEM TLBs.
1630 *
1631 * This is called internally as well as by PGM when moving GC mappings.
1632 *
1633 * @param pVM The cross context VM structure.
1634 *
1635 * @remarks Caller holds the PGM lock.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1638{
1639 RT_NOREF_PV(pVM);
1640}
1641
1642#ifdef IEM_WITH_CODE_TLB
1643
1644/**
1645 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1646 * failure and jumps.
1647 *
1648 * We end up here for a number of reasons:
1649 * - pbInstrBuf isn't yet initialized.
1650 * - Advancing beyond the buffer boundrary (e.g. cross page).
1651 * - Advancing beyond the CS segment limit.
1652 * - Fetching from non-mappable page (e.g. MMIO).
1653 *
1654 * @param pVCpu The cross context virtual CPU structure of the
1655 * calling thread.
1656 * @param pvDst Where to return the bytes.
1657 * @param cbDst Number of bytes to read.
1658 *
1659 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1660 */
1661IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1662{
1663#ifdef IN_RING3
1664 for (;;)
1665 {
1666 Assert(cbDst <= 8);
1667 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1668
1669 /*
1670 * We might have a partial buffer match, deal with that first to make the
1671 * rest simpler. This is the first part of the cross page/buffer case.
1672 */
1673 if (pVCpu->iem.s.pbInstrBuf != NULL)
1674 {
1675 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1676 {
1677 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1678 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1679 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1680
1681 cbDst -= cbCopy;
1682 pvDst = (uint8_t *)pvDst + cbCopy;
1683 offBuf += cbCopy;
1684 pVCpu->iem.s.offInstrNextByte += offBuf;
1685 }
1686 }
1687
1688 /*
1689 * Check segment limit, figuring how much we're allowed to access at this point.
1690 *
1691 * We will fault immediately if RIP is past the segment limit / in non-canonical
1692 * territory. If we do continue, there are one or more bytes to read before we
1693 * end up in trouble and we need to do that first before faulting.
1694 */
1695 RTGCPTR GCPtrFirst;
1696 uint32_t cbMaxRead;
1697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1698 {
1699 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1700 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1701 { /* likely */ }
1702 else
1703 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1704 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1705 }
1706 else
1707 {
1708 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1709 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1710 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1711 { /* likely */ }
1712 else
1713 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1714 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1715 if (cbMaxRead != 0)
1716 { /* likely */ }
1717 else
1718 {
1719 /* Overflowed because address is 0 and limit is max. */
1720 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1721 cbMaxRead = X86_PAGE_SIZE;
1722 }
1723 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1724 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1725 if (cbMaxRead2 < cbMaxRead)
1726 cbMaxRead = cbMaxRead2;
1727 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1728 }
1729
1730 /*
1731 * Get the TLB entry for this piece of code.
1732 */
1733 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1734 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1735 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1736 if (pTlbe->uTag == uTag)
1737 {
1738 /* likely when executing lots of code, otherwise unlikely */
1739# ifdef VBOX_WITH_STATISTICS
1740 pVCpu->iem.s.CodeTlb.cTlbHits++;
1741# endif
1742 }
1743 else
1744 {
1745 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1746 PGMPTWALK Walk;
1747 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
1748 if (RT_FAILURE(rc))
1749 {
1750 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1751 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1752 }
1753
1754 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1755 Assert(Walk.fSucceeded);
1756 pTlbe->uTag = uTag;
1757 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D))
1758 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
1759 pTlbe->GCPhys = Walk.GCPhys;
1760 pTlbe->pbMappingR3 = NULL;
1761 }
1762
1763 /*
1764 * Check TLB page table level access flags.
1765 */
1766 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1767 {
1768 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1769 {
1770 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1771 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1772 }
1773 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1774 {
1775 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1776 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1777 }
1778 }
1779
1780 /*
1781 * Look up the physical page info if necessary.
1782 */
1783 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1784 { /* not necessary */ }
1785 else
1786 {
1787 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1788 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1789 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1790 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1791 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1792 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1793 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1794 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1795 }
1796
1797# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1798 /*
1799 * Try do a direct read using the pbMappingR3 pointer.
1800 */
1801 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1802 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1803 {
1804 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1805 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1806 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1807 {
1808 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1809 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1810 }
1811 else
1812 {
1813 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1814 Assert(cbInstr < cbMaxRead);
1815 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1816 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1817 }
1818 if (cbDst <= cbMaxRead)
1819 {
1820 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1821 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1822 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1823 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1824 return;
1825 }
1826 pVCpu->iem.s.pbInstrBuf = NULL;
1827
1828 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1829 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1830 }
1831 else
1832# endif
1833#if 0
1834 /*
1835 * If there is no special read handling, so we can read a bit more and
1836 * put it in the prefetch buffer.
1837 */
1838 if ( cbDst < cbMaxRead
1839 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1840 {
1841 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1842 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1843 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1844 { /* likely */ }
1845 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1846 {
1847 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1848 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1849 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1850 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1851 }
1852 else
1853 {
1854 Log((RT_SUCCESS(rcStrict)
1855 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1856 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1857 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1858 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1859 }
1860 }
1861 /*
1862 * Special read handling, so only read exactly what's needed.
1863 * This is a highly unlikely scenario.
1864 */
1865 else
1866#endif
1867 {
1868 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1869 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1870 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1871 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1872 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1873 { /* likely */ }
1874 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1875 {
1876 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1877 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1878 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1879 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1880 }
1881 else
1882 {
1883 Log((RT_SUCCESS(rcStrict)
1884 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1885 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1886 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1887 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1888 }
1889 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1890 if (cbToRead == cbDst)
1891 return;
1892 }
1893
1894 /*
1895 * More to read, loop.
1896 */
1897 cbDst -= cbMaxRead;
1898 pvDst = (uint8_t *)pvDst + cbMaxRead;
1899 }
1900#else
1901 RT_NOREF(pvDst, cbDst);
1902 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1903#endif
1904}
1905
1906#else
1907
1908/**
1909 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1910 * exception if it fails.
1911 *
1912 * @returns Strict VBox status code.
1913 * @param pVCpu The cross context virtual CPU structure of the
1914 * calling thread.
1915 * @param cbMin The minimum number of bytes relative offOpcode
1916 * that must be read.
1917 */
1918IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1919{
1920 /*
1921 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1922 *
1923 * First translate CS:rIP to a physical address.
1924 */
1925 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1926 uint32_t cbToTryRead;
1927 RTGCPTR GCPtrNext;
1928 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1929 {
1930 cbToTryRead = PAGE_SIZE;
1931 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1932 if (!IEM_IS_CANONICAL(GCPtrNext))
1933 return iemRaiseGeneralProtectionFault0(pVCpu);
1934 }
1935 else
1936 {
1937 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1938 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1939 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1940 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1941 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1942 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1943 if (!cbToTryRead) /* overflowed */
1944 {
1945 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1946 cbToTryRead = UINT32_MAX;
1947 /** @todo check out wrapping around the code segment. */
1948 }
1949 if (cbToTryRead < cbMin - cbLeft)
1950 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1951 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1952 }
1953
1954 /* Only read up to the end of the page, and make sure we don't read more
1955 than the opcode buffer can hold. */
1956 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1957 if (cbToTryRead > cbLeftOnPage)
1958 cbToTryRead = cbLeftOnPage;
1959 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1960 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1961/** @todo r=bird: Convert assertion into undefined opcode exception? */
1962 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1963
1964 PGMPTWALK Walk;
1965 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1966 if (RT_FAILURE(rc))
1967 {
1968 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1970 }
1971 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1972 {
1973 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1974 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1975 }
1976 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1977 {
1978 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1979 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1980 }
1981 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & PAGE_OFFSET_MASK);
1982 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1983 /** @todo Check reserved bits and such stuff. PGM is better at doing
1984 * that, so do it when implementing the guest virtual address
1985 * TLB... */
1986
1987 /*
1988 * Read the bytes at this address.
1989 *
1990 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1991 * and since PATM should only patch the start of an instruction there
1992 * should be no need to check again here.
1993 */
1994 if (!pVCpu->iem.s.fBypassHandlers)
1995 {
1996 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1997 cbToTryRead, PGMACCESSORIGIN_IEM);
1998 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1999 { /* likely */ }
2000 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2001 {
2002 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2003 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2004 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2005 }
2006 else
2007 {
2008 Log((RT_SUCCESS(rcStrict)
2009 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2010 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2011 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2012 return rcStrict;
2013 }
2014 }
2015 else
2016 {
2017 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2018 if (RT_SUCCESS(rc))
2019 { /* likely */ }
2020 else
2021 {
2022 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2023 return rc;
2024 }
2025 }
2026 pVCpu->iem.s.cbOpcode += cbToTryRead;
2027 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2028
2029 return VINF_SUCCESS;
2030}
2031
2032#endif /* !IEM_WITH_CODE_TLB */
2033#ifndef IEM_WITH_SETJMP
2034
2035/**
2036 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2037 *
2038 * @returns Strict VBox status code.
2039 * @param pVCpu The cross context virtual CPU structure of the
2040 * calling thread.
2041 * @param pb Where to return the opcode byte.
2042 */
2043DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2044{
2045 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2046 if (rcStrict == VINF_SUCCESS)
2047 {
2048 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2049 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2050 pVCpu->iem.s.offOpcode = offOpcode + 1;
2051 }
2052 else
2053 *pb = 0;
2054 return rcStrict;
2055}
2056
2057
2058/**
2059 * Fetches the next opcode byte.
2060 *
2061 * @returns Strict VBox status code.
2062 * @param pVCpu The cross context virtual CPU structure of the
2063 * calling thread.
2064 * @param pu8 Where to return the opcode byte.
2065 */
2066DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2067{
2068 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2069 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2070 {
2071 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2072 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2073 return VINF_SUCCESS;
2074 }
2075 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2076}
2077
2078#else /* IEM_WITH_SETJMP */
2079
2080/**
2081 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2082 *
2083 * @returns The opcode byte.
2084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2085 */
2086DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2087{
2088# ifdef IEM_WITH_CODE_TLB
2089 uint8_t u8;
2090 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2091 return u8;
2092# else
2093 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2094 if (rcStrict == VINF_SUCCESS)
2095 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2096 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2097# endif
2098}
2099
2100
2101/**
2102 * Fetches the next opcode byte, longjmp on error.
2103 *
2104 * @returns The opcode byte.
2105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2106 */
2107DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2108{
2109# ifdef IEM_WITH_CODE_TLB
2110 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2111 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2112 if (RT_LIKELY( pbBuf != NULL
2113 && offBuf < pVCpu->iem.s.cbInstrBuf))
2114 {
2115 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2116 return pbBuf[offBuf];
2117 }
2118# else
2119 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2120 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2121 {
2122 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2123 return pVCpu->iem.s.abOpcode[offOpcode];
2124 }
2125# endif
2126 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2127}
2128
2129#endif /* IEM_WITH_SETJMP */
2130
2131/**
2132 * Fetches the next opcode byte, returns automatically on failure.
2133 *
2134 * @param a_pu8 Where to return the opcode byte.
2135 * @remark Implicitly references pVCpu.
2136 */
2137#ifndef IEM_WITH_SETJMP
2138# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2139 do \
2140 { \
2141 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2142 if (rcStrict2 == VINF_SUCCESS) \
2143 { /* likely */ } \
2144 else \
2145 return rcStrict2; \
2146 } while (0)
2147#else
2148# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2149#endif /* IEM_WITH_SETJMP */
2150
2151
2152#ifndef IEM_WITH_SETJMP
2153/**
2154 * Fetches the next signed byte from the opcode stream.
2155 *
2156 * @returns Strict VBox status code.
2157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2158 * @param pi8 Where to return the signed byte.
2159 */
2160DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2161{
2162 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2163}
2164#endif /* !IEM_WITH_SETJMP */
2165
2166
2167/**
2168 * Fetches the next signed byte from the opcode stream, returning automatically
2169 * on failure.
2170 *
2171 * @param a_pi8 Where to return the signed byte.
2172 * @remark Implicitly references pVCpu.
2173 */
2174#ifndef IEM_WITH_SETJMP
2175# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2176 do \
2177 { \
2178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2179 if (rcStrict2 != VINF_SUCCESS) \
2180 return rcStrict2; \
2181 } while (0)
2182#else /* IEM_WITH_SETJMP */
2183# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2184
2185#endif /* IEM_WITH_SETJMP */
2186
2187#ifndef IEM_WITH_SETJMP
2188
2189/**
2190 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2191 *
2192 * @returns Strict VBox status code.
2193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2194 * @param pu16 Where to return the opcode dword.
2195 */
2196DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2197{
2198 uint8_t u8;
2199 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2200 if (rcStrict == VINF_SUCCESS)
2201 *pu16 = (int8_t)u8;
2202 return rcStrict;
2203}
2204
2205
2206/**
2207 * Fetches the next signed byte from the opcode stream, extending it to
2208 * unsigned 16-bit.
2209 *
2210 * @returns Strict VBox status code.
2211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2212 * @param pu16 Where to return the unsigned word.
2213 */
2214DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2215{
2216 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2217 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2218 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2219
2220 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2221 pVCpu->iem.s.offOpcode = offOpcode + 1;
2222 return VINF_SUCCESS;
2223}
2224
2225#endif /* !IEM_WITH_SETJMP */
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream and sign-extending it to
2229 * a word, returning automatically on failure.
2230 *
2231 * @param a_pu16 Where to return the word.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else
2243# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244#endif
2245
2246#ifndef IEM_WITH_SETJMP
2247
2248/**
2249 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2250 *
2251 * @returns Strict VBox status code.
2252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2253 * @param pu32 Where to return the opcode dword.
2254 */
2255DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2256{
2257 uint8_t u8;
2258 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2259 if (rcStrict == VINF_SUCCESS)
2260 *pu32 = (int8_t)u8;
2261 return rcStrict;
2262}
2263
2264
2265/**
2266 * Fetches the next signed byte from the opcode stream, extending it to
2267 * unsigned 32-bit.
2268 *
2269 * @returns Strict VBox status code.
2270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2271 * @param pu32 Where to return the unsigned dword.
2272 */
2273DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2274{
2275 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2276 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2277 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2278
2279 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2280 pVCpu->iem.s.offOpcode = offOpcode + 1;
2281 return VINF_SUCCESS;
2282}
2283
2284#endif /* !IEM_WITH_SETJMP */
2285
2286/**
2287 * Fetches the next signed byte from the opcode stream and sign-extending it to
2288 * a word, returning automatically on failure.
2289 *
2290 * @param a_pu32 Where to return the word.
2291 * @remark Implicitly references pVCpu.
2292 */
2293#ifndef IEM_WITH_SETJMP
2294#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2295 do \
2296 { \
2297 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2298 if (rcStrict2 != VINF_SUCCESS) \
2299 return rcStrict2; \
2300 } while (0)
2301#else
2302# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2303#endif
2304
2305#ifndef IEM_WITH_SETJMP
2306
2307/**
2308 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2309 *
2310 * @returns Strict VBox status code.
2311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2312 * @param pu64 Where to return the opcode qword.
2313 */
2314DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2315{
2316 uint8_t u8;
2317 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2318 if (rcStrict == VINF_SUCCESS)
2319 *pu64 = (int8_t)u8;
2320 return rcStrict;
2321}
2322
2323
2324/**
2325 * Fetches the next signed byte from the opcode stream, extending it to
2326 * unsigned 64-bit.
2327 *
2328 * @returns Strict VBox status code.
2329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2330 * @param pu64 Where to return the unsigned qword.
2331 */
2332DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2333{
2334 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2335 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2336 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2337
2338 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2339 pVCpu->iem.s.offOpcode = offOpcode + 1;
2340 return VINF_SUCCESS;
2341}
2342
2343#endif /* !IEM_WITH_SETJMP */
2344
2345
2346/**
2347 * Fetches the next signed byte from the opcode stream and sign-extending it to
2348 * a word, returning automatically on failure.
2349 *
2350 * @param a_pu64 Where to return the word.
2351 * @remark Implicitly references pVCpu.
2352 */
2353#ifndef IEM_WITH_SETJMP
2354# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2355 do \
2356 { \
2357 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2358 if (rcStrict2 != VINF_SUCCESS) \
2359 return rcStrict2; \
2360 } while (0)
2361#else
2362# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2363#endif
2364
2365
2366#ifndef IEM_WITH_SETJMP
2367/**
2368 * Fetches the next opcode byte.
2369 *
2370 * @returns Strict VBox status code.
2371 * @param pVCpu The cross context virtual CPU structure of the
2372 * calling thread.
2373 * @param pu8 Where to return the opcode byte.
2374 */
2375DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2376{
2377 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2378 pVCpu->iem.s.offModRm = offOpcode;
2379 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2380 {
2381 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2382 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2383 return VINF_SUCCESS;
2384 }
2385 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2386}
2387#else /* IEM_WITH_SETJMP */
2388/**
2389 * Fetches the next opcode byte, longjmp on error.
2390 *
2391 * @returns The opcode byte.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 */
2394DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2395{
2396# ifdef IEM_WITH_CODE_TLB
2397 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2398 pVCpu->iem.s.offModRm = offBuf;
2399 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2400 if (RT_LIKELY( pbBuf != NULL
2401 && offBuf < pVCpu->iem.s.cbInstrBuf))
2402 {
2403 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2404 return pbBuf[offBuf];
2405 }
2406# else
2407 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2408 pVCpu->iem.s.offModRm = offOpcode;
2409 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2410 {
2411 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2412 return pVCpu->iem.s.abOpcode[offOpcode];
2413 }
2414# endif
2415 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2416}
2417#endif /* IEM_WITH_SETJMP */
2418
2419/**
2420 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2421 * on failure.
2422 *
2423 * Will note down the position of the ModR/M byte for VT-x exits.
2424 *
2425 * @param a_pbRm Where to return the RM opcode byte.
2426 * @remark Implicitly references pVCpu.
2427 */
2428#ifndef IEM_WITH_SETJMP
2429# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2430 do \
2431 { \
2432 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2433 if (rcStrict2 == VINF_SUCCESS) \
2434 { /* likely */ } \
2435 else \
2436 return rcStrict2; \
2437 } while (0)
2438#else
2439# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2440#endif /* IEM_WITH_SETJMP */
2441
2442
2443#ifndef IEM_WITH_SETJMP
2444
2445/**
2446 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2447 *
2448 * @returns Strict VBox status code.
2449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2450 * @param pu16 Where to return the opcode word.
2451 */
2452DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2453{
2454 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2455 if (rcStrict == VINF_SUCCESS)
2456 {
2457 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2458# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2459 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2460# else
2461 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2462# endif
2463 pVCpu->iem.s.offOpcode = offOpcode + 2;
2464 }
2465 else
2466 *pu16 = 0;
2467 return rcStrict;
2468}
2469
2470
2471/**
2472 * Fetches the next opcode word.
2473 *
2474 * @returns Strict VBox status code.
2475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2476 * @param pu16 Where to return the opcode word.
2477 */
2478DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2479{
2480 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2481 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2482 {
2483 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2484# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2485 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2486# else
2487 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488# endif
2489 return VINF_SUCCESS;
2490 }
2491 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2492}
2493
2494#else /* IEM_WITH_SETJMP */
2495
2496/**
2497 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2498 *
2499 * @returns The opcode word.
2500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2501 */
2502DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2503{
2504# ifdef IEM_WITH_CODE_TLB
2505 uint16_t u16;
2506 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2507 return u16;
2508# else
2509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2510 if (rcStrict == VINF_SUCCESS)
2511 {
2512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2513 pVCpu->iem.s.offOpcode += 2;
2514# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2515 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2516# else
2517 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2518# endif
2519 }
2520 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2521# endif
2522}
2523
2524
2525/**
2526 * Fetches the next opcode word, longjmp on error.
2527 *
2528 * @returns The opcode word.
2529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2530 */
2531DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2532{
2533# ifdef IEM_WITH_CODE_TLB
2534 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2535 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2536 if (RT_LIKELY( pbBuf != NULL
2537 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2538 {
2539 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2540# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2541 return *(uint16_t const *)&pbBuf[offBuf];
2542# else
2543 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2544# endif
2545 }
2546# else
2547 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2548 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2549 {
2550 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2551# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2552 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2553# else
2554 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2555# endif
2556 }
2557# endif
2558 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2559}
2560
2561#endif /* IEM_WITH_SETJMP */
2562
2563
2564/**
2565 * Fetches the next opcode word, returns automatically on failure.
2566 *
2567 * @param a_pu16 Where to return the opcode word.
2568 * @remark Implicitly references pVCpu.
2569 */
2570#ifndef IEM_WITH_SETJMP
2571# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2572 do \
2573 { \
2574 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2575 if (rcStrict2 != VINF_SUCCESS) \
2576 return rcStrict2; \
2577 } while (0)
2578#else
2579# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2580#endif
2581
2582#ifndef IEM_WITH_SETJMP
2583
2584/**
2585 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2586 *
2587 * @returns Strict VBox status code.
2588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2589 * @param pu32 Where to return the opcode double word.
2590 */
2591DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2592{
2593 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2594 if (rcStrict == VINF_SUCCESS)
2595 {
2596 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2597 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2598 pVCpu->iem.s.offOpcode = offOpcode + 2;
2599 }
2600 else
2601 *pu32 = 0;
2602 return rcStrict;
2603}
2604
2605
2606/**
2607 * Fetches the next opcode word, zero extending it to a double word.
2608 *
2609 * @returns Strict VBox status code.
2610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2611 * @param pu32 Where to return the opcode double word.
2612 */
2613DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2614{
2615 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2616 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2617 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2618
2619 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2620 pVCpu->iem.s.offOpcode = offOpcode + 2;
2621 return VINF_SUCCESS;
2622}
2623
2624#endif /* !IEM_WITH_SETJMP */
2625
2626
2627/**
2628 * Fetches the next opcode word and zero extends it to a double word, returns
2629 * automatically on failure.
2630 *
2631 * @param a_pu32 Where to return the opcode double word.
2632 * @remark Implicitly references pVCpu.
2633 */
2634#ifndef IEM_WITH_SETJMP
2635# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2636 do \
2637 { \
2638 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2639 if (rcStrict2 != VINF_SUCCESS) \
2640 return rcStrict2; \
2641 } while (0)
2642#else
2643# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2644#endif
2645
2646#ifndef IEM_WITH_SETJMP
2647
2648/**
2649 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2650 *
2651 * @returns Strict VBox status code.
2652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2653 * @param pu64 Where to return the opcode quad word.
2654 */
2655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2656{
2657 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2658 if (rcStrict == VINF_SUCCESS)
2659 {
2660 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2661 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2662 pVCpu->iem.s.offOpcode = offOpcode + 2;
2663 }
2664 else
2665 *pu64 = 0;
2666 return rcStrict;
2667}
2668
2669
2670/**
2671 * Fetches the next opcode word, zero extending it to a quad word.
2672 *
2673 * @returns Strict VBox status code.
2674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2675 * @param pu64 Where to return the opcode quad word.
2676 */
2677DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2678{
2679 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2680 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2681 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2682
2683 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2684 pVCpu->iem.s.offOpcode = offOpcode + 2;
2685 return VINF_SUCCESS;
2686}
2687
2688#endif /* !IEM_WITH_SETJMP */
2689
2690/**
2691 * Fetches the next opcode word and zero extends it to a quad word, returns
2692 * automatically on failure.
2693 *
2694 * @param a_pu64 Where to return the opcode quad word.
2695 * @remark Implicitly references pVCpu.
2696 */
2697#ifndef IEM_WITH_SETJMP
2698# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2699 do \
2700 { \
2701 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2702 if (rcStrict2 != VINF_SUCCESS) \
2703 return rcStrict2; \
2704 } while (0)
2705#else
2706# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2707#endif
2708
2709
2710#ifndef IEM_WITH_SETJMP
2711/**
2712 * Fetches the next signed word from the opcode stream.
2713 *
2714 * @returns Strict VBox status code.
2715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2716 * @param pi16 Where to return the signed word.
2717 */
2718DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2719{
2720 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2721}
2722#endif /* !IEM_WITH_SETJMP */
2723
2724
2725/**
2726 * Fetches the next signed word from the opcode stream, returning automatically
2727 * on failure.
2728 *
2729 * @param a_pi16 Where to return the signed word.
2730 * @remark Implicitly references pVCpu.
2731 */
2732#ifndef IEM_WITH_SETJMP
2733# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2734 do \
2735 { \
2736 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2737 if (rcStrict2 != VINF_SUCCESS) \
2738 return rcStrict2; \
2739 } while (0)
2740#else
2741# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2742#endif
2743
2744#ifndef IEM_WITH_SETJMP
2745
2746/**
2747 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2748 *
2749 * @returns Strict VBox status code.
2750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2751 * @param pu32 Where to return the opcode dword.
2752 */
2753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2754{
2755 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2756 if (rcStrict == VINF_SUCCESS)
2757 {
2758 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2759# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2760 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2761# else
2762 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2763 pVCpu->iem.s.abOpcode[offOpcode + 1],
2764 pVCpu->iem.s.abOpcode[offOpcode + 2],
2765 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2766# endif
2767 pVCpu->iem.s.offOpcode = offOpcode + 4;
2768 }
2769 else
2770 *pu32 = 0;
2771 return rcStrict;
2772}
2773
2774
2775/**
2776 * Fetches the next opcode dword.
2777 *
2778 * @returns Strict VBox status code.
2779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2780 * @param pu32 Where to return the opcode double word.
2781 */
2782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2783{
2784 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2785 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2786 {
2787 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2788# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2789 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2790# else
2791 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2792 pVCpu->iem.s.abOpcode[offOpcode + 1],
2793 pVCpu->iem.s.abOpcode[offOpcode + 2],
2794 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2795# endif
2796 return VINF_SUCCESS;
2797 }
2798 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2799}
2800
2801#else /* !IEM_WITH_SETJMP */
2802
2803/**
2804 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2805 *
2806 * @returns The opcode dword.
2807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2808 */
2809DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2810{
2811# ifdef IEM_WITH_CODE_TLB
2812 uint32_t u32;
2813 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2814 return u32;
2815# else
2816 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2817 if (rcStrict == VINF_SUCCESS)
2818 {
2819 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2820 pVCpu->iem.s.offOpcode = offOpcode + 4;
2821# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2822 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2823# else
2824 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2825 pVCpu->iem.s.abOpcode[offOpcode + 1],
2826 pVCpu->iem.s.abOpcode[offOpcode + 2],
2827 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2828# endif
2829 }
2830 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2831# endif
2832}
2833
2834
2835/**
2836 * Fetches the next opcode dword, longjmp on error.
2837 *
2838 * @returns The opcode dword.
2839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2840 */
2841DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2842{
2843# ifdef IEM_WITH_CODE_TLB
2844 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2845 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2846 if (RT_LIKELY( pbBuf != NULL
2847 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2848 {
2849 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2850# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2851 return *(uint32_t const *)&pbBuf[offBuf];
2852# else
2853 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2854 pbBuf[offBuf + 1],
2855 pbBuf[offBuf + 2],
2856 pbBuf[offBuf + 3]);
2857# endif
2858 }
2859# else
2860 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2861 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2862 {
2863 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2864# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2865 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2866# else
2867 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2868 pVCpu->iem.s.abOpcode[offOpcode + 1],
2869 pVCpu->iem.s.abOpcode[offOpcode + 2],
2870 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2871# endif
2872 }
2873# endif
2874 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2875}
2876
2877#endif /* !IEM_WITH_SETJMP */
2878
2879
2880/**
2881 * Fetches the next opcode dword, returns automatically on failure.
2882 *
2883 * @param a_pu32 Where to return the opcode dword.
2884 * @remark Implicitly references pVCpu.
2885 */
2886#ifndef IEM_WITH_SETJMP
2887# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2888 do \
2889 { \
2890 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2891 if (rcStrict2 != VINF_SUCCESS) \
2892 return rcStrict2; \
2893 } while (0)
2894#else
2895# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2896#endif
2897
2898#ifndef IEM_WITH_SETJMP
2899
2900/**
2901 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2902 *
2903 * @returns Strict VBox status code.
2904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2905 * @param pu64 Where to return the opcode dword.
2906 */
2907DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2908{
2909 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2910 if (rcStrict == VINF_SUCCESS)
2911 {
2912 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2913 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2914 pVCpu->iem.s.abOpcode[offOpcode + 1],
2915 pVCpu->iem.s.abOpcode[offOpcode + 2],
2916 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2917 pVCpu->iem.s.offOpcode = offOpcode + 4;
2918 }
2919 else
2920 *pu64 = 0;
2921 return rcStrict;
2922}
2923
2924
2925/**
2926 * Fetches the next opcode dword, zero extending it to a quad word.
2927 *
2928 * @returns Strict VBox status code.
2929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2930 * @param pu64 Where to return the opcode quad word.
2931 */
2932DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2933{
2934 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2935 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2936 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2937
2938 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2939 pVCpu->iem.s.abOpcode[offOpcode + 1],
2940 pVCpu->iem.s.abOpcode[offOpcode + 2],
2941 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2942 pVCpu->iem.s.offOpcode = offOpcode + 4;
2943 return VINF_SUCCESS;
2944}
2945
2946#endif /* !IEM_WITH_SETJMP */
2947
2948
2949/**
2950 * Fetches the next opcode dword and zero extends it to a quad word, returns
2951 * automatically on failure.
2952 *
2953 * @param a_pu64 Where to return the opcode quad word.
2954 * @remark Implicitly references pVCpu.
2955 */
2956#ifndef IEM_WITH_SETJMP
2957# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2958 do \
2959 { \
2960 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2961 if (rcStrict2 != VINF_SUCCESS) \
2962 return rcStrict2; \
2963 } while (0)
2964#else
2965# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2966#endif
2967
2968
2969#ifndef IEM_WITH_SETJMP
2970/**
2971 * Fetches the next signed double word from the opcode stream.
2972 *
2973 * @returns Strict VBox status code.
2974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2975 * @param pi32 Where to return the signed double word.
2976 */
2977DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2978{
2979 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2980}
2981#endif
2982
2983/**
2984 * Fetches the next signed double word from the opcode stream, returning
2985 * automatically on failure.
2986 *
2987 * @param a_pi32 Where to return the signed double word.
2988 * @remark Implicitly references pVCpu.
2989 */
2990#ifndef IEM_WITH_SETJMP
2991# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2992 do \
2993 { \
2994 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2995 if (rcStrict2 != VINF_SUCCESS) \
2996 return rcStrict2; \
2997 } while (0)
2998#else
2999# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3000#endif
3001
3002#ifndef IEM_WITH_SETJMP
3003
3004/**
3005 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3006 *
3007 * @returns Strict VBox status code.
3008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3009 * @param pu64 Where to return the opcode qword.
3010 */
3011DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3012{
3013 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3014 if (rcStrict == VINF_SUCCESS)
3015 {
3016 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3017 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3018 pVCpu->iem.s.abOpcode[offOpcode + 1],
3019 pVCpu->iem.s.abOpcode[offOpcode + 2],
3020 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3021 pVCpu->iem.s.offOpcode = offOpcode + 4;
3022 }
3023 else
3024 *pu64 = 0;
3025 return rcStrict;
3026}
3027
3028
3029/**
3030 * Fetches the next opcode dword, sign extending it into a quad word.
3031 *
3032 * @returns Strict VBox status code.
3033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3034 * @param pu64 Where to return the opcode quad word.
3035 */
3036DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3037{
3038 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3039 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3040 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3041
3042 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3043 pVCpu->iem.s.abOpcode[offOpcode + 1],
3044 pVCpu->iem.s.abOpcode[offOpcode + 2],
3045 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3046 *pu64 = i32;
3047 pVCpu->iem.s.offOpcode = offOpcode + 4;
3048 return VINF_SUCCESS;
3049}
3050
3051#endif /* !IEM_WITH_SETJMP */
3052
3053
3054/**
3055 * Fetches the next opcode double word and sign extends it to a quad word,
3056 * returns automatically on failure.
3057 *
3058 * @param a_pu64 Where to return the opcode quad word.
3059 * @remark Implicitly references pVCpu.
3060 */
3061#ifndef IEM_WITH_SETJMP
3062# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3063 do \
3064 { \
3065 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3066 if (rcStrict2 != VINF_SUCCESS) \
3067 return rcStrict2; \
3068 } while (0)
3069#else
3070# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3071#endif
3072
3073#ifndef IEM_WITH_SETJMP
3074
3075/**
3076 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3077 *
3078 * @returns Strict VBox status code.
3079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3080 * @param pu64 Where to return the opcode qword.
3081 */
3082DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3083{
3084 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3085 if (rcStrict == VINF_SUCCESS)
3086 {
3087 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3088# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3089 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3090# else
3091 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3092 pVCpu->iem.s.abOpcode[offOpcode + 1],
3093 pVCpu->iem.s.abOpcode[offOpcode + 2],
3094 pVCpu->iem.s.abOpcode[offOpcode + 3],
3095 pVCpu->iem.s.abOpcode[offOpcode + 4],
3096 pVCpu->iem.s.abOpcode[offOpcode + 5],
3097 pVCpu->iem.s.abOpcode[offOpcode + 6],
3098 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3099# endif
3100 pVCpu->iem.s.offOpcode = offOpcode + 8;
3101 }
3102 else
3103 *pu64 = 0;
3104 return rcStrict;
3105}
3106
3107
3108/**
3109 * Fetches the next opcode qword.
3110 *
3111 * @returns Strict VBox status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param pu64 Where to return the opcode qword.
3114 */
3115DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3116{
3117 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3118 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3119 {
3120# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3121 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3122# else
3123 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3124 pVCpu->iem.s.abOpcode[offOpcode + 1],
3125 pVCpu->iem.s.abOpcode[offOpcode + 2],
3126 pVCpu->iem.s.abOpcode[offOpcode + 3],
3127 pVCpu->iem.s.abOpcode[offOpcode + 4],
3128 pVCpu->iem.s.abOpcode[offOpcode + 5],
3129 pVCpu->iem.s.abOpcode[offOpcode + 6],
3130 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3131# endif
3132 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3133 return VINF_SUCCESS;
3134 }
3135 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3136}
3137
3138#else /* IEM_WITH_SETJMP */
3139
3140/**
3141 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3142 *
3143 * @returns The opcode qword.
3144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3145 */
3146DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3147{
3148# ifdef IEM_WITH_CODE_TLB
3149 uint64_t u64;
3150 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3151 return u64;
3152# else
3153 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3154 if (rcStrict == VINF_SUCCESS)
3155 {
3156 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3157 pVCpu->iem.s.offOpcode = offOpcode + 8;
3158# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3159 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3160# else
3161 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3162 pVCpu->iem.s.abOpcode[offOpcode + 1],
3163 pVCpu->iem.s.abOpcode[offOpcode + 2],
3164 pVCpu->iem.s.abOpcode[offOpcode + 3],
3165 pVCpu->iem.s.abOpcode[offOpcode + 4],
3166 pVCpu->iem.s.abOpcode[offOpcode + 5],
3167 pVCpu->iem.s.abOpcode[offOpcode + 6],
3168 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3169# endif
3170 }
3171 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3172# endif
3173}
3174
3175
3176/**
3177 * Fetches the next opcode qword, longjmp on error.
3178 *
3179 * @returns The opcode qword.
3180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3181 */
3182DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3183{
3184# ifdef IEM_WITH_CODE_TLB
3185 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3186 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3187 if (RT_LIKELY( pbBuf != NULL
3188 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3189 {
3190 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3191# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3192 return *(uint64_t const *)&pbBuf[offBuf];
3193# else
3194 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3195 pbBuf[offBuf + 1],
3196 pbBuf[offBuf + 2],
3197 pbBuf[offBuf + 3],
3198 pbBuf[offBuf + 4],
3199 pbBuf[offBuf + 5],
3200 pbBuf[offBuf + 6],
3201 pbBuf[offBuf + 7]);
3202# endif
3203 }
3204# else
3205 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3206 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3207 {
3208 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3209# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3210 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3211# else
3212 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3213 pVCpu->iem.s.abOpcode[offOpcode + 1],
3214 pVCpu->iem.s.abOpcode[offOpcode + 2],
3215 pVCpu->iem.s.abOpcode[offOpcode + 3],
3216 pVCpu->iem.s.abOpcode[offOpcode + 4],
3217 pVCpu->iem.s.abOpcode[offOpcode + 5],
3218 pVCpu->iem.s.abOpcode[offOpcode + 6],
3219 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3220# endif
3221 }
3222# endif
3223 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3224}
3225
3226#endif /* IEM_WITH_SETJMP */
3227
3228/**
3229 * Fetches the next opcode quad word, returns automatically on failure.
3230 *
3231 * @param a_pu64 Where to return the opcode quad word.
3232 * @remark Implicitly references pVCpu.
3233 */
3234#ifndef IEM_WITH_SETJMP
3235# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3236 do \
3237 { \
3238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3239 if (rcStrict2 != VINF_SUCCESS) \
3240 return rcStrict2; \
3241 } while (0)
3242#else
3243# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3244#endif
3245
3246
3247/** @name Misc Worker Functions.
3248 * @{
3249 */
3250
3251/**
3252 * Gets the exception class for the specified exception vector.
3253 *
3254 * @returns The class of the specified exception.
3255 * @param uVector The exception vector.
3256 */
3257IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3258{
3259 Assert(uVector <= X86_XCPT_LAST);
3260 switch (uVector)
3261 {
3262 case X86_XCPT_DE:
3263 case X86_XCPT_TS:
3264 case X86_XCPT_NP:
3265 case X86_XCPT_SS:
3266 case X86_XCPT_GP:
3267 case X86_XCPT_SX: /* AMD only */
3268 return IEMXCPTCLASS_CONTRIBUTORY;
3269
3270 case X86_XCPT_PF:
3271 case X86_XCPT_VE: /* Intel only */
3272 return IEMXCPTCLASS_PAGE_FAULT;
3273
3274 case X86_XCPT_DF:
3275 return IEMXCPTCLASS_DOUBLE_FAULT;
3276 }
3277 return IEMXCPTCLASS_BENIGN;
3278}
3279
3280
3281/**
3282 * Evaluates how to handle an exception caused during delivery of another event
3283 * (exception / interrupt).
3284 *
3285 * @returns How to handle the recursive exception.
3286 * @param pVCpu The cross context virtual CPU structure of the
3287 * calling thread.
3288 * @param fPrevFlags The flags of the previous event.
3289 * @param uPrevVector The vector of the previous event.
3290 * @param fCurFlags The flags of the current exception.
3291 * @param uCurVector The vector of the current exception.
3292 * @param pfXcptRaiseInfo Where to store additional information about the
3293 * exception condition. Optional.
3294 */
3295VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3296 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3297{
3298 /*
3299 * Only CPU exceptions can be raised while delivering other events, software interrupt
3300 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3301 */
3302 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3303 Assert(pVCpu); RT_NOREF(pVCpu);
3304 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3305
3306 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3307 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3308 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3309 {
3310 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3311 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3312 {
3313 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3314 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3315 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3316 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3317 {
3318 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3319 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3320 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3321 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3322 uCurVector, pVCpu->cpum.GstCtx.cr2));
3323 }
3324 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3325 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3326 {
3327 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3328 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3329 }
3330 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3331 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3332 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3333 {
3334 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3335 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3336 }
3337 }
3338 else
3339 {
3340 if (uPrevVector == X86_XCPT_NMI)
3341 {
3342 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3343 if (uCurVector == X86_XCPT_PF)
3344 {
3345 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3346 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3347 }
3348 }
3349 else if ( uPrevVector == X86_XCPT_AC
3350 && uCurVector == X86_XCPT_AC)
3351 {
3352 enmRaise = IEMXCPTRAISE_CPU_HANG;
3353 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3354 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3355 }
3356 }
3357 }
3358 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3359 {
3360 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3361 if (uCurVector == X86_XCPT_PF)
3362 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3363 }
3364 else
3365 {
3366 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3367 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3368 }
3369
3370 if (pfXcptRaiseInfo)
3371 *pfXcptRaiseInfo = fRaiseInfo;
3372 return enmRaise;
3373}
3374
3375
3376/**
3377 * Enters the CPU shutdown state initiated by a triple fault or other
3378 * unrecoverable conditions.
3379 *
3380 * @returns Strict VBox status code.
3381 * @param pVCpu The cross context virtual CPU structure of the
3382 * calling thread.
3383 */
3384IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3385{
3386 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3387 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3388
3389 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3390 {
3391 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3392 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3393 }
3394
3395 RT_NOREF(pVCpu);
3396 return VINF_EM_TRIPLE_FAULT;
3397}
3398
3399
3400/**
3401 * Validates a new SS segment.
3402 *
3403 * @returns VBox strict status code.
3404 * @param pVCpu The cross context virtual CPU structure of the
3405 * calling thread.
3406 * @param NewSS The new SS selctor.
3407 * @param uCpl The CPL to load the stack for.
3408 * @param pDesc Where to return the descriptor.
3409 */
3410IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3411{
3412 /* Null selectors are not allowed (we're not called for dispatching
3413 interrupts with SS=0 in long mode). */
3414 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3417 return iemRaiseTaskSwitchFault0(pVCpu);
3418 }
3419
3420 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3421 if ((NewSS & X86_SEL_RPL) != uCpl)
3422 {
3423 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3424 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3425 }
3426
3427 /*
3428 * Read the descriptor.
3429 */
3430 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3431 if (rcStrict != VINF_SUCCESS)
3432 return rcStrict;
3433
3434 /*
3435 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3436 */
3437 if (!pDesc->Legacy.Gen.u1DescType)
3438 {
3439 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3440 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3441 }
3442
3443 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3444 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3445 {
3446 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3447 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3448 }
3449 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3450 {
3451 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3452 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3453 }
3454
3455 /* Is it there? */
3456 /** @todo testcase: Is this checked before the canonical / limit check below? */
3457 if (!pDesc->Legacy.Gen.u1Present)
3458 {
3459 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3460 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3461 }
3462
3463 return VINF_SUCCESS;
3464}
3465
3466
3467/**
3468 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3469 * not (kind of obsolete now).
3470 *
3471 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3472 */
3473#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3474
3475/**
3476 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3477 *
3478 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3479 * @param a_fEfl The new EFLAGS.
3480 */
3481#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3482
3483/** @} */
3484
3485
3486/** @name Raising Exceptions.
3487 *
3488 * @{
3489 */
3490
3491
3492/**
3493 * Loads the specified stack far pointer from the TSS.
3494 *
3495 * @returns VBox strict status code.
3496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3497 * @param uCpl The CPL to load the stack for.
3498 * @param pSelSS Where to return the new stack segment.
3499 * @param puEsp Where to return the new stack pointer.
3500 */
3501IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3502{
3503 VBOXSTRICTRC rcStrict;
3504 Assert(uCpl < 4);
3505
3506 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3507 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3508 {
3509 /*
3510 * 16-bit TSS (X86TSS16).
3511 */
3512 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3513 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3514 {
3515 uint32_t off = uCpl * 4 + 2;
3516 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3517 {
3518 /** @todo check actual access pattern here. */
3519 uint32_t u32Tmp = 0; /* gcc maybe... */
3520 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3521 if (rcStrict == VINF_SUCCESS)
3522 {
3523 *puEsp = RT_LOWORD(u32Tmp);
3524 *pSelSS = RT_HIWORD(u32Tmp);
3525 return VINF_SUCCESS;
3526 }
3527 }
3528 else
3529 {
3530 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3531 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3532 }
3533 break;
3534 }
3535
3536 /*
3537 * 32-bit TSS (X86TSS32).
3538 */
3539 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3540 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3541 {
3542 uint32_t off = uCpl * 8 + 4;
3543 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3544 {
3545/** @todo check actual access pattern here. */
3546 uint64_t u64Tmp;
3547 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3548 if (rcStrict == VINF_SUCCESS)
3549 {
3550 *puEsp = u64Tmp & UINT32_MAX;
3551 *pSelSS = (RTSEL)(u64Tmp >> 32);
3552 return VINF_SUCCESS;
3553 }
3554 }
3555 else
3556 {
3557 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3558 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3559 }
3560 break;
3561 }
3562
3563 default:
3564 AssertFailed();
3565 rcStrict = VERR_IEM_IPE_4;
3566 break;
3567 }
3568
3569 *puEsp = 0; /* make gcc happy */
3570 *pSelSS = 0; /* make gcc happy */
3571 return rcStrict;
3572}
3573
3574
3575/**
3576 * Loads the specified stack pointer from the 64-bit TSS.
3577 *
3578 * @returns VBox strict status code.
3579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3580 * @param uCpl The CPL to load the stack for.
3581 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3582 * @param puRsp Where to return the new stack pointer.
3583 */
3584IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3585{
3586 Assert(uCpl < 4);
3587 Assert(uIst < 8);
3588 *puRsp = 0; /* make gcc happy */
3589
3590 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3591 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3592
3593 uint32_t off;
3594 if (uIst)
3595 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3596 else
3597 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3598 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3599 {
3600 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3601 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3602 }
3603
3604 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3605}
3606
3607
3608/**
3609 * Adjust the CPU state according to the exception being raised.
3610 *
3611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3612 * @param u8Vector The exception that has been raised.
3613 */
3614DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3615{
3616 switch (u8Vector)
3617 {
3618 case X86_XCPT_DB:
3619 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3620 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3621 break;
3622 /** @todo Read the AMD and Intel exception reference... */
3623 }
3624}
3625
3626
3627/**
3628 * Implements exceptions and interrupts for real mode.
3629 *
3630 * @returns VBox strict status code.
3631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3632 * @param cbInstr The number of bytes to offset rIP by in the return
3633 * address.
3634 * @param u8Vector The interrupt / exception vector number.
3635 * @param fFlags The flags.
3636 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3637 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3638 */
3639IEM_STATIC VBOXSTRICTRC
3640iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3641 uint8_t cbInstr,
3642 uint8_t u8Vector,
3643 uint32_t fFlags,
3644 uint16_t uErr,
3645 uint64_t uCr2)
3646{
3647 NOREF(uErr); NOREF(uCr2);
3648 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3649
3650 /*
3651 * Read the IDT entry.
3652 */
3653 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3654 {
3655 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3656 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3657 }
3658 RTFAR16 Idte;
3659 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3660 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3661 {
3662 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3663 return rcStrict;
3664 }
3665
3666 /*
3667 * Push the stack frame.
3668 */
3669 uint16_t *pu16Frame;
3670 uint64_t uNewRsp;
3671 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3672 if (rcStrict != VINF_SUCCESS)
3673 return rcStrict;
3674
3675 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3676#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3677 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3678 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3679 fEfl |= UINT16_C(0xf000);
3680#endif
3681 pu16Frame[2] = (uint16_t)fEfl;
3682 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3683 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3684 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3685 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3686 return rcStrict;
3687
3688 /*
3689 * Load the vector address into cs:ip and make exception specific state
3690 * adjustments.
3691 */
3692 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3693 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3694 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3695 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3696 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3697 pVCpu->cpum.GstCtx.rip = Idte.off;
3698 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3699 IEMMISC_SET_EFL(pVCpu, fEfl);
3700
3701 /** @todo do we actually do this in real mode? */
3702 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3703 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3704
3705 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3706}
3707
3708
3709/**
3710 * Loads a NULL data selector into when coming from V8086 mode.
3711 *
3712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3713 * @param pSReg Pointer to the segment register.
3714 */
3715IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3716{
3717 pSReg->Sel = 0;
3718 pSReg->ValidSel = 0;
3719 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3720 {
3721 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3722 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3723 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3724 }
3725 else
3726 {
3727 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3728 /** @todo check this on AMD-V */
3729 pSReg->u64Base = 0;
3730 pSReg->u32Limit = 0;
3731 }
3732}
3733
3734
3735/**
3736 * Loads a segment selector during a task switch in V8086 mode.
3737 *
3738 * @param pSReg Pointer to the segment register.
3739 * @param uSel The selector value to load.
3740 */
3741IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3742{
3743 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3744 pSReg->Sel = uSel;
3745 pSReg->ValidSel = uSel;
3746 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3747 pSReg->u64Base = uSel << 4;
3748 pSReg->u32Limit = 0xffff;
3749 pSReg->Attr.u = 0xf3;
3750}
3751
3752
3753/**
3754 * Loads a NULL data selector into a selector register, both the hidden and
3755 * visible parts, in protected mode.
3756 *
3757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3758 * @param pSReg Pointer to the segment register.
3759 * @param uRpl The RPL.
3760 */
3761IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3762{
3763 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3764 * data selector in protected mode. */
3765 pSReg->Sel = uRpl;
3766 pSReg->ValidSel = uRpl;
3767 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3768 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3769 {
3770 /* VT-x (Intel 3960x) observed doing something like this. */
3771 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3772 pSReg->u32Limit = UINT32_MAX;
3773 pSReg->u64Base = 0;
3774 }
3775 else
3776 {
3777 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3778 pSReg->u32Limit = 0;
3779 pSReg->u64Base = 0;
3780 }
3781}
3782
3783
3784/**
3785 * Loads a segment selector during a task switch in protected mode.
3786 *
3787 * In this task switch scenario, we would throw \#TS exceptions rather than
3788 * \#GPs.
3789 *
3790 * @returns VBox strict status code.
3791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3792 * @param pSReg Pointer to the segment register.
3793 * @param uSel The new selector value.
3794 *
3795 * @remarks This does _not_ handle CS or SS.
3796 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3797 */
3798IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3799{
3800 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3801
3802 /* Null data selector. */
3803 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3804 {
3805 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3807 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3808 return VINF_SUCCESS;
3809 }
3810
3811 /* Fetch the descriptor. */
3812 IEMSELDESC Desc;
3813 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3814 if (rcStrict != VINF_SUCCESS)
3815 {
3816 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3817 VBOXSTRICTRC_VAL(rcStrict)));
3818 return rcStrict;
3819 }
3820
3821 /* Must be a data segment or readable code segment. */
3822 if ( !Desc.Legacy.Gen.u1DescType
3823 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3824 {
3825 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3826 Desc.Legacy.Gen.u4Type));
3827 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3828 }
3829
3830 /* Check privileges for data segments and non-conforming code segments. */
3831 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3832 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3833 {
3834 /* The RPL and the new CPL must be less than or equal to the DPL. */
3835 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3836 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3837 {
3838 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3839 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3840 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3841 }
3842 }
3843
3844 /* Is it there? */
3845 if (!Desc.Legacy.Gen.u1Present)
3846 {
3847 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3848 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3849 }
3850
3851 /* The base and limit. */
3852 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3853 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3854
3855 /*
3856 * Ok, everything checked out fine. Now set the accessed bit before
3857 * committing the result into the registers.
3858 */
3859 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3860 {
3861 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3862 if (rcStrict != VINF_SUCCESS)
3863 return rcStrict;
3864 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3865 }
3866
3867 /* Commit */
3868 pSReg->Sel = uSel;
3869 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3870 pSReg->u32Limit = cbLimit;
3871 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3872 pSReg->ValidSel = uSel;
3873 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3874 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3875 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3876
3877 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3878 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3879 return VINF_SUCCESS;
3880}
3881
3882
3883/**
3884 * Performs a task switch.
3885 *
3886 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3887 * caller is responsible for performing the necessary checks (like DPL, TSS
3888 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3889 * reference for JMP, CALL, IRET.
3890 *
3891 * If the task switch is the due to a software interrupt or hardware exception,
3892 * the caller is responsible for validating the TSS selector and descriptor. See
3893 * Intel Instruction reference for INT n.
3894 *
3895 * @returns VBox strict status code.
3896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3897 * @param enmTaskSwitch The cause of the task switch.
3898 * @param uNextEip The EIP effective after the task switch.
3899 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3900 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3901 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3902 * @param SelTSS The TSS selector of the new task.
3903 * @param pNewDescTSS Pointer to the new TSS descriptor.
3904 */
3905IEM_STATIC VBOXSTRICTRC
3906iemTaskSwitch(PVMCPUCC pVCpu,
3907 IEMTASKSWITCH enmTaskSwitch,
3908 uint32_t uNextEip,
3909 uint32_t fFlags,
3910 uint16_t uErr,
3911 uint64_t uCr2,
3912 RTSEL SelTSS,
3913 PIEMSELDESC pNewDescTSS)
3914{
3915 Assert(!IEM_IS_REAL_MODE(pVCpu));
3916 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3917 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3918
3919 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3920 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3921 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3922 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3923 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3924
3925 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3926 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3927
3928 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3929 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3930
3931 /* Update CR2 in case it's a page-fault. */
3932 /** @todo This should probably be done much earlier in IEM/PGM. See
3933 * @bugref{5653#c49}. */
3934 if (fFlags & IEM_XCPT_FLAGS_CR2)
3935 pVCpu->cpum.GstCtx.cr2 = uCr2;
3936
3937 /*
3938 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3939 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3940 */
3941 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3942 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3943 if (uNewTSSLimit < uNewTSSLimitMin)
3944 {
3945 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3946 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3947 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3948 }
3949
3950 /*
3951 * Task switches in VMX non-root mode always cause task switches.
3952 * The new TSS must have been read and validated (DPL, limits etc.) before a
3953 * task-switch VM-exit commences.
3954 *
3955 * See Intel spec. 25.4.2 "Treatment of Task Switches".
3956 */
3957 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3958 {
3959 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3960 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3961 }
3962
3963 /*
3964 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3965 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3966 */
3967 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3968 {
3969 uint32_t const uExitInfo1 = SelTSS;
3970 uint32_t uExitInfo2 = uErr;
3971 switch (enmTaskSwitch)
3972 {
3973 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3974 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3975 default: break;
3976 }
3977 if (fFlags & IEM_XCPT_FLAGS_ERR)
3978 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3979 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3980 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3981
3982 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3983 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3984 RT_NOREF2(uExitInfo1, uExitInfo2);
3985 }
3986
3987 /*
3988 * Check the current TSS limit. The last written byte to the current TSS during the
3989 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3990 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3991 *
3992 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3993 * end up with smaller than "legal" TSS limits.
3994 */
3995 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3996 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3997 if (uCurTSSLimit < uCurTSSLimitMin)
3998 {
3999 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4000 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4001 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4002 }
4003
4004 /*
4005 * Verify that the new TSS can be accessed and map it. Map only the required contents
4006 * and not the entire TSS.
4007 */
4008 void *pvNewTSS;
4009 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4010 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4011 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4012 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4013 * not perform correct translation if this happens. See Intel spec. 7.2.1
4014 * "Task-State Segment". */
4015 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4016 if (rcStrict != VINF_SUCCESS)
4017 {
4018 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4019 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4020 return rcStrict;
4021 }
4022
4023 /*
4024 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4025 */
4026 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4027 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4028 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4029 {
4030 PX86DESC pDescCurTSS;
4031 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4032 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4033 if (rcStrict != VINF_SUCCESS)
4034 {
4035 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4036 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4037 return rcStrict;
4038 }
4039
4040 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4041 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4042 if (rcStrict != VINF_SUCCESS)
4043 {
4044 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4045 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4046 return rcStrict;
4047 }
4048
4049 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4050 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4051 {
4052 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4053 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4054 u32EFlags &= ~X86_EFL_NT;
4055 }
4056 }
4057
4058 /*
4059 * Save the CPU state into the current TSS.
4060 */
4061 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4062 if (GCPtrNewTSS == GCPtrCurTSS)
4063 {
4064 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4065 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4066 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4067 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4068 pVCpu->cpum.GstCtx.ldtr.Sel));
4069 }
4070 if (fIsNewTSS386)
4071 {
4072 /*
4073 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4074 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4075 */
4076 void *pvCurTSS32;
4077 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4078 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4079 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4080 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4081 if (rcStrict != VINF_SUCCESS)
4082 {
4083 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4084 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4085 return rcStrict;
4086 }
4087
4088 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4089 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4090 pCurTSS32->eip = uNextEip;
4091 pCurTSS32->eflags = u32EFlags;
4092 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4093 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4094 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4095 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4096 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4097 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4098 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4099 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4100 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4101 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4102 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4103 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4104 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4105 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4106
4107 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4108 if (rcStrict != VINF_SUCCESS)
4109 {
4110 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4111 VBOXSTRICTRC_VAL(rcStrict)));
4112 return rcStrict;
4113 }
4114 }
4115 else
4116 {
4117 /*
4118 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4119 */
4120 void *pvCurTSS16;
4121 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4122 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4123 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4124 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4125 if (rcStrict != VINF_SUCCESS)
4126 {
4127 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4128 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4129 return rcStrict;
4130 }
4131
4132 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4133 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4134 pCurTSS16->ip = uNextEip;
4135 pCurTSS16->flags = u32EFlags;
4136 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4137 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4138 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4139 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4140 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4141 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4142 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4143 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4144 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4145 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4146 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4147 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4148
4149 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4150 if (rcStrict != VINF_SUCCESS)
4151 {
4152 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4153 VBOXSTRICTRC_VAL(rcStrict)));
4154 return rcStrict;
4155 }
4156 }
4157
4158 /*
4159 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4160 */
4161 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4162 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4163 {
4164 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4165 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4166 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4167 }
4168
4169 /*
4170 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4171 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4172 */
4173 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4174 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4175 bool fNewDebugTrap;
4176 if (fIsNewTSS386)
4177 {
4178 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4179 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4180 uNewEip = pNewTSS32->eip;
4181 uNewEflags = pNewTSS32->eflags;
4182 uNewEax = pNewTSS32->eax;
4183 uNewEcx = pNewTSS32->ecx;
4184 uNewEdx = pNewTSS32->edx;
4185 uNewEbx = pNewTSS32->ebx;
4186 uNewEsp = pNewTSS32->esp;
4187 uNewEbp = pNewTSS32->ebp;
4188 uNewEsi = pNewTSS32->esi;
4189 uNewEdi = pNewTSS32->edi;
4190 uNewES = pNewTSS32->es;
4191 uNewCS = pNewTSS32->cs;
4192 uNewSS = pNewTSS32->ss;
4193 uNewDS = pNewTSS32->ds;
4194 uNewFS = pNewTSS32->fs;
4195 uNewGS = pNewTSS32->gs;
4196 uNewLdt = pNewTSS32->selLdt;
4197 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4198 }
4199 else
4200 {
4201 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4202 uNewCr3 = 0;
4203 uNewEip = pNewTSS16->ip;
4204 uNewEflags = pNewTSS16->flags;
4205 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4206 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4207 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4208 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4209 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4210 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4211 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4212 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4213 uNewES = pNewTSS16->es;
4214 uNewCS = pNewTSS16->cs;
4215 uNewSS = pNewTSS16->ss;
4216 uNewDS = pNewTSS16->ds;
4217 uNewFS = 0;
4218 uNewGS = 0;
4219 uNewLdt = pNewTSS16->selLdt;
4220 fNewDebugTrap = false;
4221 }
4222
4223 if (GCPtrNewTSS == GCPtrCurTSS)
4224 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4225 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4226
4227 /*
4228 * We're done accessing the new TSS.
4229 */
4230 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4231 if (rcStrict != VINF_SUCCESS)
4232 {
4233 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4234 return rcStrict;
4235 }
4236
4237 /*
4238 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4239 */
4240 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4241 {
4242 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4243 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4244 if (rcStrict != VINF_SUCCESS)
4245 {
4246 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4247 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4248 return rcStrict;
4249 }
4250
4251 /* Check that the descriptor indicates the new TSS is available (not busy). */
4252 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4253 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4254 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4255
4256 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4257 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4258 if (rcStrict != VINF_SUCCESS)
4259 {
4260 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4261 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264 }
4265
4266 /*
4267 * From this point on, we're technically in the new task. We will defer exceptions
4268 * until the completion of the task switch but before executing any instructions in the new task.
4269 */
4270 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4271 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4272 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4273 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4274 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4275 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4276 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4277
4278 /* Set the busy bit in TR. */
4279 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4280
4281 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4282 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4283 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4284 {
4285 uNewEflags |= X86_EFL_NT;
4286 }
4287
4288 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4289 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4290 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4291
4292 pVCpu->cpum.GstCtx.eip = uNewEip;
4293 pVCpu->cpum.GstCtx.eax = uNewEax;
4294 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4295 pVCpu->cpum.GstCtx.edx = uNewEdx;
4296 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4297 pVCpu->cpum.GstCtx.esp = uNewEsp;
4298 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4299 pVCpu->cpum.GstCtx.esi = uNewEsi;
4300 pVCpu->cpum.GstCtx.edi = uNewEdi;
4301
4302 uNewEflags &= X86_EFL_LIVE_MASK;
4303 uNewEflags |= X86_EFL_RA1_MASK;
4304 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4305
4306 /*
4307 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4308 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4309 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4310 */
4311 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4312 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4313
4314 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4315 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4316
4317 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4318 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4319
4320 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4321 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4322
4323 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4324 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4325
4326 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4327 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4328 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4329
4330 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4331 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4332 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4333 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4334
4335 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4336 {
4337 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4338 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4339 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4340 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4341 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4342 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4343 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4344 }
4345
4346 /*
4347 * Switch CR3 for the new task.
4348 */
4349 if ( fIsNewTSS386
4350 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4351 {
4352 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4353 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4354 AssertRCSuccessReturn(rc, rc);
4355
4356 /* Inform PGM. */
4357 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
4358 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* fPdpesMapped */);
4359 AssertRCReturn(rc, rc);
4360 /* ignore informational status codes */
4361
4362 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4363 }
4364
4365 /*
4366 * Switch LDTR for the new task.
4367 */
4368 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4369 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4370 else
4371 {
4372 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4373
4374 IEMSELDESC DescNewLdt;
4375 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4376 if (rcStrict != VINF_SUCCESS)
4377 {
4378 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4379 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4380 return rcStrict;
4381 }
4382 if ( !DescNewLdt.Legacy.Gen.u1Present
4383 || DescNewLdt.Legacy.Gen.u1DescType
4384 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4385 {
4386 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4387 uNewLdt, DescNewLdt.Legacy.u));
4388 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4389 }
4390
4391 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4392 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4393 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4394 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4395 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4396 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4397 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4399 }
4400
4401 IEMSELDESC DescSS;
4402 if (IEM_IS_V86_MODE(pVCpu))
4403 {
4404 pVCpu->iem.s.uCpl = 3;
4405 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4406 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4407 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4408 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4409 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4410 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4411
4412 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4413 DescSS.Legacy.u = 0;
4414 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4415 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4416 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4417 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4418 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4419 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4420 DescSS.Legacy.Gen.u2Dpl = 3;
4421 }
4422 else
4423 {
4424 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4425
4426 /*
4427 * Load the stack segment for the new task.
4428 */
4429 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4430 {
4431 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4432 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4433 }
4434
4435 /* Fetch the descriptor. */
4436 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4437 if (rcStrict != VINF_SUCCESS)
4438 {
4439 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4440 VBOXSTRICTRC_VAL(rcStrict)));
4441 return rcStrict;
4442 }
4443
4444 /* SS must be a data segment and writable. */
4445 if ( !DescSS.Legacy.Gen.u1DescType
4446 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4447 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4448 {
4449 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4450 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4451 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4452 }
4453
4454 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4455 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4456 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4457 {
4458 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4459 uNewCpl));
4460 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4461 }
4462
4463 /* Is it there? */
4464 if (!DescSS.Legacy.Gen.u1Present)
4465 {
4466 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4467 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4468 }
4469
4470 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4471 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4472
4473 /* Set the accessed bit before committing the result into SS. */
4474 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4475 {
4476 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4477 if (rcStrict != VINF_SUCCESS)
4478 return rcStrict;
4479 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4480 }
4481
4482 /* Commit SS. */
4483 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4484 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4485 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4486 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4487 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4488 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4489 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4490
4491 /* CPL has changed, update IEM before loading rest of segments. */
4492 pVCpu->iem.s.uCpl = uNewCpl;
4493
4494 /*
4495 * Load the data segments for the new task.
4496 */
4497 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4498 if (rcStrict != VINF_SUCCESS)
4499 return rcStrict;
4500 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4501 if (rcStrict != VINF_SUCCESS)
4502 return rcStrict;
4503 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4504 if (rcStrict != VINF_SUCCESS)
4505 return rcStrict;
4506 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4507 if (rcStrict != VINF_SUCCESS)
4508 return rcStrict;
4509
4510 /*
4511 * Load the code segment for the new task.
4512 */
4513 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4514 {
4515 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4516 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4517 }
4518
4519 /* Fetch the descriptor. */
4520 IEMSELDESC DescCS;
4521 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4522 if (rcStrict != VINF_SUCCESS)
4523 {
4524 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4525 return rcStrict;
4526 }
4527
4528 /* CS must be a code segment. */
4529 if ( !DescCS.Legacy.Gen.u1DescType
4530 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4531 {
4532 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4533 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4534 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4535 }
4536
4537 /* For conforming CS, DPL must be less than or equal to the RPL. */
4538 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4539 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4540 {
4541 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4542 DescCS.Legacy.Gen.u2Dpl));
4543 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 /* For non-conforming CS, DPL must match RPL. */
4547 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4548 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4549 {
4550 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4551 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4552 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4553 }
4554
4555 /* Is it there? */
4556 if (!DescCS.Legacy.Gen.u1Present)
4557 {
4558 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4559 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4560 }
4561
4562 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4563 u64Base = X86DESC_BASE(&DescCS.Legacy);
4564
4565 /* Set the accessed bit before committing the result into CS. */
4566 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4567 {
4568 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4569 if (rcStrict != VINF_SUCCESS)
4570 return rcStrict;
4571 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4572 }
4573
4574 /* Commit CS. */
4575 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4576 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4577 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4578 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4579 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4580 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4582 }
4583
4584 /** @todo Debug trap. */
4585 if (fIsNewTSS386 && fNewDebugTrap)
4586 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4587
4588 /*
4589 * Construct the error code masks based on what caused this task switch.
4590 * See Intel Instruction reference for INT.
4591 */
4592 uint16_t uExt;
4593 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4594 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4595 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4596 {
4597 uExt = 1;
4598 }
4599 else
4600 uExt = 0;
4601
4602 /*
4603 * Push any error code on to the new stack.
4604 */
4605 if (fFlags & IEM_XCPT_FLAGS_ERR)
4606 {
4607 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4608 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4609 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4610
4611 /* Check that there is sufficient space on the stack. */
4612 /** @todo Factor out segment limit checking for normal/expand down segments
4613 * into a separate function. */
4614 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4615 {
4616 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4617 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4618 {
4619 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4620 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4621 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4622 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4623 }
4624 }
4625 else
4626 {
4627 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4628 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4629 {
4630 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4631 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4632 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4633 }
4634 }
4635
4636
4637 if (fIsNewTSS386)
4638 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4639 else
4640 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4641 if (rcStrict != VINF_SUCCESS)
4642 {
4643 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4644 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4645 return rcStrict;
4646 }
4647 }
4648
4649 /* Check the new EIP against the new CS limit. */
4650 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4651 {
4652 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4653 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4654 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4655 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4656 }
4657
4658 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4659 pVCpu->cpum.GstCtx.ss.Sel));
4660 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4661}
4662
4663
4664/**
4665 * Implements exceptions and interrupts for protected mode.
4666 *
4667 * @returns VBox strict status code.
4668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4669 * @param cbInstr The number of bytes to offset rIP by in the return
4670 * address.
4671 * @param u8Vector The interrupt / exception vector number.
4672 * @param fFlags The flags.
4673 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4674 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4675 */
4676IEM_STATIC VBOXSTRICTRC
4677iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4678 uint8_t cbInstr,
4679 uint8_t u8Vector,
4680 uint32_t fFlags,
4681 uint16_t uErr,
4682 uint64_t uCr2)
4683{
4684 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4685
4686 /*
4687 * Read the IDT entry.
4688 */
4689 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4690 {
4691 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4693 }
4694 X86DESC Idte;
4695 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4696 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4697 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4698 {
4699 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4700 return rcStrict;
4701 }
4702 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4703 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4704 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4705
4706 /*
4707 * Check the descriptor type, DPL and such.
4708 * ASSUMES this is done in the same order as described for call-gate calls.
4709 */
4710 if (Idte.Gate.u1DescType)
4711 {
4712 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4713 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4714 }
4715 bool fTaskGate = false;
4716 uint8_t f32BitGate = true;
4717 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4718 switch (Idte.Gate.u4Type)
4719 {
4720 case X86_SEL_TYPE_SYS_UNDEFINED:
4721 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4722 case X86_SEL_TYPE_SYS_LDT:
4723 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4724 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4725 case X86_SEL_TYPE_SYS_UNDEFINED2:
4726 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4727 case X86_SEL_TYPE_SYS_UNDEFINED3:
4728 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4729 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4730 case X86_SEL_TYPE_SYS_UNDEFINED4:
4731 {
4732 /** @todo check what actually happens when the type is wrong...
4733 * esp. call gates. */
4734 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4735 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4736 }
4737
4738 case X86_SEL_TYPE_SYS_286_INT_GATE:
4739 f32BitGate = false;
4740 RT_FALL_THRU();
4741 case X86_SEL_TYPE_SYS_386_INT_GATE:
4742 fEflToClear |= X86_EFL_IF;
4743 break;
4744
4745 case X86_SEL_TYPE_SYS_TASK_GATE:
4746 fTaskGate = true;
4747#ifndef IEM_IMPLEMENTS_TASKSWITCH
4748 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4749#endif
4750 break;
4751
4752 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4753 f32BitGate = false;
4754 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4755 break;
4756
4757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4758 }
4759
4760 /* Check DPL against CPL if applicable. */
4761 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4762 {
4763 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4764 {
4765 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4766 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4767 }
4768 }
4769
4770 /* Is it there? */
4771 if (!Idte.Gate.u1Present)
4772 {
4773 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4774 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4775 }
4776
4777 /* Is it a task-gate? */
4778 if (fTaskGate)
4779 {
4780 /*
4781 * Construct the error code masks based on what caused this task switch.
4782 * See Intel Instruction reference for INT.
4783 */
4784 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4785 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4786 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4787 RTSEL SelTSS = Idte.Gate.u16Sel;
4788
4789 /*
4790 * Fetch the TSS descriptor in the GDT.
4791 */
4792 IEMSELDESC DescTSS;
4793 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4794 if (rcStrict != VINF_SUCCESS)
4795 {
4796 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4797 VBOXSTRICTRC_VAL(rcStrict)));
4798 return rcStrict;
4799 }
4800
4801 /* The TSS descriptor must be a system segment and be available (not busy). */
4802 if ( DescTSS.Legacy.Gen.u1DescType
4803 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4804 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4805 {
4806 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4807 u8Vector, SelTSS, DescTSS.Legacy.au64));
4808 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4809 }
4810
4811 /* The TSS must be present. */
4812 if (!DescTSS.Legacy.Gen.u1Present)
4813 {
4814 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4815 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4816 }
4817
4818 /* Do the actual task switch. */
4819 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4820 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4821 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4822 }
4823
4824 /* A null CS is bad. */
4825 RTSEL NewCS = Idte.Gate.u16Sel;
4826 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4827 {
4828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4829 return iemRaiseGeneralProtectionFault0(pVCpu);
4830 }
4831
4832 /* Fetch the descriptor for the new CS. */
4833 IEMSELDESC DescCS;
4834 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4835 if (rcStrict != VINF_SUCCESS)
4836 {
4837 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4838 return rcStrict;
4839 }
4840
4841 /* Must be a code segment. */
4842 if (!DescCS.Legacy.Gen.u1DescType)
4843 {
4844 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4845 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4846 }
4847 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4848 {
4849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4850 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4851 }
4852
4853 /* Don't allow lowering the privilege level. */
4854 /** @todo Does the lowering of privileges apply to software interrupts
4855 * only? This has bearings on the more-privileged or
4856 * same-privilege stack behavior further down. A testcase would
4857 * be nice. */
4858 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4859 {
4860 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4861 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4862 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4863 }
4864
4865 /* Make sure the selector is present. */
4866 if (!DescCS.Legacy.Gen.u1Present)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4869 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4870 }
4871
4872 /* Check the new EIP against the new CS limit. */
4873 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4874 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4875 ? Idte.Gate.u16OffsetLow
4876 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4877 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4878 if (uNewEip > cbLimitCS)
4879 {
4880 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4881 u8Vector, uNewEip, cbLimitCS, NewCS));
4882 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4883 }
4884 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4885
4886 /* Calc the flag image to push. */
4887 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4888 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4889 fEfl &= ~X86_EFL_RF;
4890 else
4891 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4892
4893 /* From V8086 mode only go to CPL 0. */
4894 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4895 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4896 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4897 {
4898 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4899 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4900 }
4901
4902 /*
4903 * If the privilege level changes, we need to get a new stack from the TSS.
4904 * This in turns means validating the new SS and ESP...
4905 */
4906 if (uNewCpl != pVCpu->iem.s.uCpl)
4907 {
4908 RTSEL NewSS;
4909 uint32_t uNewEsp;
4910 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4911 if (rcStrict != VINF_SUCCESS)
4912 return rcStrict;
4913
4914 IEMSELDESC DescSS;
4915 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4916 if (rcStrict != VINF_SUCCESS)
4917 return rcStrict;
4918 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4919 if (!DescSS.Legacy.Gen.u1DefBig)
4920 {
4921 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4922 uNewEsp = (uint16_t)uNewEsp;
4923 }
4924
4925 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4926
4927 /* Check that there is sufficient space for the stack frame. */
4928 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4929 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4930 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4931 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4932
4933 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4934 {
4935 if ( uNewEsp - 1 > cbLimitSS
4936 || uNewEsp < cbStackFrame)
4937 {
4938 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4939 u8Vector, NewSS, uNewEsp, cbStackFrame));
4940 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4941 }
4942 }
4943 else
4944 {
4945 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4946 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4947 {
4948 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4949 u8Vector, NewSS, uNewEsp, cbStackFrame));
4950 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4951 }
4952 }
4953
4954 /*
4955 * Start making changes.
4956 */
4957
4958 /* Set the new CPL so that stack accesses use it. */
4959 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4960 pVCpu->iem.s.uCpl = uNewCpl;
4961
4962 /* Create the stack frame. */
4963 RTPTRUNION uStackFrame;
4964 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4965 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4966 if (rcStrict != VINF_SUCCESS)
4967 return rcStrict;
4968 void * const pvStackFrame = uStackFrame.pv;
4969 if (f32BitGate)
4970 {
4971 if (fFlags & IEM_XCPT_FLAGS_ERR)
4972 *uStackFrame.pu32++ = uErr;
4973 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4974 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4975 uStackFrame.pu32[2] = fEfl;
4976 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4977 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4978 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4979 if (fEfl & X86_EFL_VM)
4980 {
4981 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4982 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4983 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4984 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4985 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4986 }
4987 }
4988 else
4989 {
4990 if (fFlags & IEM_XCPT_FLAGS_ERR)
4991 *uStackFrame.pu16++ = uErr;
4992 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4993 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4994 uStackFrame.pu16[2] = fEfl;
4995 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4996 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4997 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4998 if (fEfl & X86_EFL_VM)
4999 {
5000 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5001 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5002 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5003 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5004 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5005 }
5006 }
5007 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5008 if (rcStrict != VINF_SUCCESS)
5009 return rcStrict;
5010
5011 /* Mark the selectors 'accessed' (hope this is the correct time). */
5012 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5013 * after pushing the stack frame? (Write protect the gdt + stack to
5014 * find out.) */
5015 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5016 {
5017 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5021 }
5022
5023 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5024 {
5025 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5026 if (rcStrict != VINF_SUCCESS)
5027 return rcStrict;
5028 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5029 }
5030
5031 /*
5032 * Start comitting the register changes (joins with the DPL=CPL branch).
5033 */
5034 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5035 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5036 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5037 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5038 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5039 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5040 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5041 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5042 * SP is loaded).
5043 * Need to check the other combinations too:
5044 * - 16-bit TSS, 32-bit handler
5045 * - 32-bit TSS, 16-bit handler */
5046 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5047 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5048 else
5049 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5050
5051 if (fEfl & X86_EFL_VM)
5052 {
5053 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5054 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5055 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5056 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5057 }
5058 }
5059 /*
5060 * Same privilege, no stack change and smaller stack frame.
5061 */
5062 else
5063 {
5064 uint64_t uNewRsp;
5065 RTPTRUNION uStackFrame;
5066 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5067 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5068 if (rcStrict != VINF_SUCCESS)
5069 return rcStrict;
5070 void * const pvStackFrame = uStackFrame.pv;
5071
5072 if (f32BitGate)
5073 {
5074 if (fFlags & IEM_XCPT_FLAGS_ERR)
5075 *uStackFrame.pu32++ = uErr;
5076 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5077 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5078 uStackFrame.pu32[2] = fEfl;
5079 }
5080 else
5081 {
5082 if (fFlags & IEM_XCPT_FLAGS_ERR)
5083 *uStackFrame.pu16++ = uErr;
5084 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5085 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5086 uStackFrame.pu16[2] = fEfl;
5087 }
5088 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5089 if (rcStrict != VINF_SUCCESS)
5090 return rcStrict;
5091
5092 /* Mark the CS selector as 'accessed'. */
5093 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5094 {
5095 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5096 if (rcStrict != VINF_SUCCESS)
5097 return rcStrict;
5098 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5099 }
5100
5101 /*
5102 * Start committing the register changes (joins with the other branch).
5103 */
5104 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5105 }
5106
5107 /* ... register committing continues. */
5108 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5109 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5110 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5111 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5112 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5113 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5114
5115 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5116 fEfl &= ~fEflToClear;
5117 IEMMISC_SET_EFL(pVCpu, fEfl);
5118
5119 if (fFlags & IEM_XCPT_FLAGS_CR2)
5120 pVCpu->cpum.GstCtx.cr2 = uCr2;
5121
5122 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5123 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5124
5125 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5126}
5127
5128
5129/**
5130 * Implements exceptions and interrupts for long mode.
5131 *
5132 * @returns VBox strict status code.
5133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5134 * @param cbInstr The number of bytes to offset rIP by in the return
5135 * address.
5136 * @param u8Vector The interrupt / exception vector number.
5137 * @param fFlags The flags.
5138 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5139 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5140 */
5141IEM_STATIC VBOXSTRICTRC
5142iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5143 uint8_t cbInstr,
5144 uint8_t u8Vector,
5145 uint32_t fFlags,
5146 uint16_t uErr,
5147 uint64_t uCr2)
5148{
5149 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5150
5151 /*
5152 * Read the IDT entry.
5153 */
5154 uint16_t offIdt = (uint16_t)u8Vector << 4;
5155 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5156 {
5157 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5159 }
5160 X86DESC64 Idte;
5161#ifdef _MSC_VER /* Shut up silly compiler warning. */
5162 Idte.au64[0] = 0;
5163 Idte.au64[1] = 0;
5164#endif
5165 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5166 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5167 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5168 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5169 {
5170 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5171 return rcStrict;
5172 }
5173 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5174 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5175 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5176
5177 /*
5178 * Check the descriptor type, DPL and such.
5179 * ASSUMES this is done in the same order as described for call-gate calls.
5180 */
5181 if (Idte.Gate.u1DescType)
5182 {
5183 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5184 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5185 }
5186 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5187 switch (Idte.Gate.u4Type)
5188 {
5189 case AMD64_SEL_TYPE_SYS_INT_GATE:
5190 fEflToClear |= X86_EFL_IF;
5191 break;
5192 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5193 break;
5194
5195 default:
5196 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5197 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5198 }
5199
5200 /* Check DPL against CPL if applicable. */
5201 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5202 {
5203 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5204 {
5205 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5206 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5207 }
5208 }
5209
5210 /* Is it there? */
5211 if (!Idte.Gate.u1Present)
5212 {
5213 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5214 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5215 }
5216
5217 /* A null CS is bad. */
5218 RTSEL NewCS = Idte.Gate.u16Sel;
5219 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5220 {
5221 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5222 return iemRaiseGeneralProtectionFault0(pVCpu);
5223 }
5224
5225 /* Fetch the descriptor for the new CS. */
5226 IEMSELDESC DescCS;
5227 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5228 if (rcStrict != VINF_SUCCESS)
5229 {
5230 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5231 return rcStrict;
5232 }
5233
5234 /* Must be a 64-bit code segment. */
5235 if (!DescCS.Long.Gen.u1DescType)
5236 {
5237 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5238 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5239 }
5240 if ( !DescCS.Long.Gen.u1Long
5241 || DescCS.Long.Gen.u1DefBig
5242 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5243 {
5244 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5245 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5246 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5247 }
5248
5249 /* Don't allow lowering the privilege level. For non-conforming CS
5250 selectors, the CS.DPL sets the privilege level the trap/interrupt
5251 handler runs at. For conforming CS selectors, the CPL remains
5252 unchanged, but the CS.DPL must be <= CPL. */
5253 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5254 * when CPU in Ring-0. Result \#GP? */
5255 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5256 {
5257 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5258 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5259 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5260 }
5261
5262
5263 /* Make sure the selector is present. */
5264 if (!DescCS.Legacy.Gen.u1Present)
5265 {
5266 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5267 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5268 }
5269
5270 /* Check that the new RIP is canonical. */
5271 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5272 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5273 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5274 if (!IEM_IS_CANONICAL(uNewRip))
5275 {
5276 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5277 return iemRaiseGeneralProtectionFault0(pVCpu);
5278 }
5279
5280 /*
5281 * If the privilege level changes or if the IST isn't zero, we need to get
5282 * a new stack from the TSS.
5283 */
5284 uint64_t uNewRsp;
5285 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5286 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5287 if ( uNewCpl != pVCpu->iem.s.uCpl
5288 || Idte.Gate.u3IST != 0)
5289 {
5290 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5291 if (rcStrict != VINF_SUCCESS)
5292 return rcStrict;
5293 }
5294 else
5295 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5296 uNewRsp &= ~(uint64_t)0xf;
5297
5298 /*
5299 * Calc the flag image to push.
5300 */
5301 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5302 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5303 fEfl &= ~X86_EFL_RF;
5304 else
5305 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5306
5307 /*
5308 * Start making changes.
5309 */
5310 /* Set the new CPL so that stack accesses use it. */
5311 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5312 pVCpu->iem.s.uCpl = uNewCpl;
5313
5314 /* Create the stack frame. */
5315 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5316 RTPTRUNION uStackFrame;
5317 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5318 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5319 if (rcStrict != VINF_SUCCESS)
5320 return rcStrict;
5321 void * const pvStackFrame = uStackFrame.pv;
5322
5323 if (fFlags & IEM_XCPT_FLAGS_ERR)
5324 *uStackFrame.pu64++ = uErr;
5325 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5326 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5327 uStackFrame.pu64[2] = fEfl;
5328 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5329 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5330 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5331 if (rcStrict != VINF_SUCCESS)
5332 return rcStrict;
5333
5334 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5335 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5336 * after pushing the stack frame? (Write protect the gdt + stack to
5337 * find out.) */
5338 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5339 {
5340 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5341 if (rcStrict != VINF_SUCCESS)
5342 return rcStrict;
5343 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5344 }
5345
5346 /*
5347 * Start comitting the register changes.
5348 */
5349 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5350 * hidden registers when interrupting 32-bit or 16-bit code! */
5351 if (uNewCpl != uOldCpl)
5352 {
5353 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5354 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5355 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5356 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5357 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5358 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5359 }
5360 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5361 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5362 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5363 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5364 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5365 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5366 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5367 pVCpu->cpum.GstCtx.rip = uNewRip;
5368
5369 fEfl &= ~fEflToClear;
5370 IEMMISC_SET_EFL(pVCpu, fEfl);
5371
5372 if (fFlags & IEM_XCPT_FLAGS_CR2)
5373 pVCpu->cpum.GstCtx.cr2 = uCr2;
5374
5375 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5376 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5377
5378 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5379}
5380
5381
5382/**
5383 * Implements exceptions and interrupts.
5384 *
5385 * All exceptions and interrupts goes thru this function!
5386 *
5387 * @returns VBox strict status code.
5388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5389 * @param cbInstr The number of bytes to offset rIP by in the return
5390 * address.
5391 * @param u8Vector The interrupt / exception vector number.
5392 * @param fFlags The flags.
5393 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5394 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5395 */
5396DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5397iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5398 uint8_t cbInstr,
5399 uint8_t u8Vector,
5400 uint32_t fFlags,
5401 uint16_t uErr,
5402 uint64_t uCr2)
5403{
5404 /*
5405 * Get all the state that we might need here.
5406 */
5407 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5408 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5409
5410#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5411 /*
5412 * Flush prefetch buffer
5413 */
5414 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5415#endif
5416
5417 /*
5418 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5419 */
5420 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5421 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5422 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5423 | IEM_XCPT_FLAGS_BP_INSTR
5424 | IEM_XCPT_FLAGS_ICEBP_INSTR
5425 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5426 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5427 {
5428 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5429 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5430 u8Vector = X86_XCPT_GP;
5431 uErr = 0;
5432 }
5433#ifdef DBGFTRACE_ENABLED
5434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5435 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5436 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5437#endif
5438
5439 /*
5440 * Evaluate whether NMI blocking should be in effect.
5441 * Normally, NMI blocking is in effect whenever we inject an NMI.
5442 */
5443 bool fBlockNmi;
5444 if ( u8Vector == X86_XCPT_NMI
5445 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5446 fBlockNmi = true;
5447 else
5448 fBlockNmi = false;
5449
5450#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5451 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5452 {
5453 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5454 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5455 return rcStrict0;
5456
5457 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5458 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5459 {
5460 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5461 fBlockNmi = false;
5462 }
5463 }
5464#endif
5465
5466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5467 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5468 {
5469 /*
5470 * If the event is being injected as part of VMRUN, it isn't subject to event
5471 * intercepts in the nested-guest. However, secondary exceptions that occur
5472 * during injection of any event -are- subject to exception intercepts.
5473 *
5474 * See AMD spec. 15.20 "Event Injection".
5475 */
5476 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5477 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5478 else
5479 {
5480 /*
5481 * Check and handle if the event being raised is intercepted.
5482 */
5483 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5484 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5485 return rcStrict0;
5486 }
5487 }
5488#endif
5489
5490 /*
5491 * Set NMI blocking if necessary.
5492 */
5493 if ( fBlockNmi
5494 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5495 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5496
5497 /*
5498 * Do recursion accounting.
5499 */
5500 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5501 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5502 if (pVCpu->iem.s.cXcptRecursions == 0)
5503 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5504 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5505 else
5506 {
5507 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5508 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5509 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5510
5511 if (pVCpu->iem.s.cXcptRecursions >= 4)
5512 {
5513#ifdef DEBUG_bird
5514 AssertFailed();
5515#endif
5516 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5517 }
5518
5519 /*
5520 * Evaluate the sequence of recurring events.
5521 */
5522 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5523 NULL /* pXcptRaiseInfo */);
5524 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5525 { /* likely */ }
5526 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5527 {
5528 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5529 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5530 u8Vector = X86_XCPT_DF;
5531 uErr = 0;
5532#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5533 /* VMX nested-guest #DF intercept needs to be checked here. */
5534 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5535 {
5536 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5537 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5538 return rcStrict0;
5539 }
5540#endif
5541 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5542 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5543 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5544 }
5545 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5546 {
5547 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5548 return iemInitiateCpuShutdown(pVCpu);
5549 }
5550 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5551 {
5552 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5553 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5554 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5555 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5556 return VERR_EM_GUEST_CPU_HANG;
5557 }
5558 else
5559 {
5560 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5561 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5562 return VERR_IEM_IPE_9;
5563 }
5564
5565 /*
5566 * The 'EXT' bit is set when an exception occurs during deliver of an external
5567 * event (such as an interrupt or earlier exception)[1]. Privileged software
5568 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5569 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5570 *
5571 * [1] - Intel spec. 6.13 "Error Code"
5572 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5573 * [3] - Intel Instruction reference for INT n.
5574 */
5575 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5576 && (fFlags & IEM_XCPT_FLAGS_ERR)
5577 && u8Vector != X86_XCPT_PF
5578 && u8Vector != X86_XCPT_DF)
5579 {
5580 uErr |= X86_TRAP_ERR_EXTERNAL;
5581 }
5582 }
5583
5584 pVCpu->iem.s.cXcptRecursions++;
5585 pVCpu->iem.s.uCurXcpt = u8Vector;
5586 pVCpu->iem.s.fCurXcpt = fFlags;
5587 pVCpu->iem.s.uCurXcptErr = uErr;
5588 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5589
5590 /*
5591 * Extensive logging.
5592 */
5593#if defined(LOG_ENABLED) && defined(IN_RING3)
5594 if (LogIs3Enabled())
5595 {
5596 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5597 PVM pVM = pVCpu->CTX_SUFF(pVM);
5598 char szRegs[4096];
5599 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5600 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5601 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5602 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5603 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5604 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5605 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5606 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5607 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5608 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5609 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5610 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5611 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5612 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5613 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5614 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5615 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5616 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5617 " efer=%016VR{efer}\n"
5618 " pat=%016VR{pat}\n"
5619 " sf_mask=%016VR{sf_mask}\n"
5620 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5621 " lstar=%016VR{lstar}\n"
5622 " star=%016VR{star} cstar=%016VR{cstar}\n"
5623 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5624 );
5625
5626 char szInstr[256];
5627 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5628 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5629 szInstr, sizeof(szInstr), NULL);
5630 Log3(("%s%s\n", szRegs, szInstr));
5631 }
5632#endif /* LOG_ENABLED */
5633
5634 /*
5635 * Call the mode specific worker function.
5636 */
5637 VBOXSTRICTRC rcStrict;
5638 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5639 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5640 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5641 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5642 else
5643 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5644
5645 /* Flush the prefetch buffer. */
5646#ifdef IEM_WITH_CODE_TLB
5647 pVCpu->iem.s.pbInstrBuf = NULL;
5648#else
5649 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5650#endif
5651
5652 /*
5653 * Unwind.
5654 */
5655 pVCpu->iem.s.cXcptRecursions--;
5656 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5657 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5658 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5659 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5660 pVCpu->iem.s.cXcptRecursions + 1));
5661 return rcStrict;
5662}
5663
5664#ifdef IEM_WITH_SETJMP
5665/**
5666 * See iemRaiseXcptOrInt. Will not return.
5667 */
5668IEM_STATIC DECL_NO_RETURN(void)
5669iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5670 uint8_t cbInstr,
5671 uint8_t u8Vector,
5672 uint32_t fFlags,
5673 uint16_t uErr,
5674 uint64_t uCr2)
5675{
5676 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5677 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5678}
5679#endif
5680
5681
5682/** \#DE - 00. */
5683DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5684{
5685 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5686}
5687
5688
5689/** \#DB - 01.
5690 * @note This automatically clear DR7.GD. */
5691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5692{
5693 /** @todo set/clear RF. */
5694 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5696}
5697
5698
5699/** \#BR - 05. */
5700DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5701{
5702 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5703}
5704
5705
5706/** \#UD - 06. */
5707DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5708{
5709 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5710}
5711
5712
5713/** \#NM - 07. */
5714DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5715{
5716 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5717}
5718
5719
5720/** \#TS(err) - 0a. */
5721DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5722{
5723 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5724}
5725
5726
5727/** \#TS(tr) - 0a. */
5728DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5729{
5730 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5731 pVCpu->cpum.GstCtx.tr.Sel, 0);
5732}
5733
5734
5735/** \#TS(0) - 0a. */
5736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5737{
5738 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5739 0, 0);
5740}
5741
5742
5743/** \#TS(err) - 0a. */
5744DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5745{
5746 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5747 uSel & X86_SEL_MASK_OFF_RPL, 0);
5748}
5749
5750
5751/** \#NP(err) - 0b. */
5752DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5753{
5754 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5755}
5756
5757
5758/** \#NP(sel) - 0b. */
5759DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5760{
5761 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5762 uSel & ~X86_SEL_RPL, 0);
5763}
5764
5765
5766/** \#SS(seg) - 0c. */
5767DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5768{
5769 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5770 uSel & ~X86_SEL_RPL, 0);
5771}
5772
5773
5774/** \#SS(err) - 0c. */
5775DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5776{
5777 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5778}
5779
5780
5781/** \#GP(n) - 0d. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5785}
5786
5787
5788/** \#GP(0) - 0d. */
5789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5790{
5791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5792}
5793
5794#ifdef IEM_WITH_SETJMP
5795/** \#GP(0) - 0d. */
5796DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5797{
5798 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5799}
5800#endif
5801
5802
5803/** \#GP(sel) - 0d. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5805{
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5807 Sel & ~X86_SEL_RPL, 0);
5808}
5809
5810
5811/** \#GP(0) - 0d. */
5812DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5813{
5814 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5815}
5816
5817
5818/** \#GP(sel) - 0d. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5820{
5821 NOREF(iSegReg); NOREF(fAccess);
5822 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5823 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5824}
5825
5826#ifdef IEM_WITH_SETJMP
5827/** \#GP(sel) - 0d, longjmp. */
5828DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5829{
5830 NOREF(iSegReg); NOREF(fAccess);
5831 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5832 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5833}
5834#endif
5835
5836/** \#GP(sel) - 0d. */
5837DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5838{
5839 NOREF(Sel);
5840 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5841}
5842
5843#ifdef IEM_WITH_SETJMP
5844/** \#GP(sel) - 0d, longjmp. */
5845DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5846{
5847 NOREF(Sel);
5848 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5849}
5850#endif
5851
5852
5853/** \#GP(sel) - 0d. */
5854DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5855{
5856 NOREF(iSegReg); NOREF(fAccess);
5857 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5858}
5859
5860#ifdef IEM_WITH_SETJMP
5861/** \#GP(sel) - 0d, longjmp. */
5862DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5863 uint32_t fAccess)
5864{
5865 NOREF(iSegReg); NOREF(fAccess);
5866 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5867}
5868#endif
5869
5870
5871/** \#PF(n) - 0e. */
5872DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5873{
5874 uint16_t uErr;
5875 switch (rc)
5876 {
5877 case VERR_PAGE_NOT_PRESENT:
5878 case VERR_PAGE_TABLE_NOT_PRESENT:
5879 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5880 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5881 uErr = 0;
5882 break;
5883
5884 default:
5885 AssertMsgFailed(("%Rrc\n", rc));
5886 RT_FALL_THRU();
5887 case VERR_ACCESS_DENIED:
5888 uErr = X86_TRAP_PF_P;
5889 break;
5890
5891 /** @todo reserved */
5892 }
5893
5894 if (pVCpu->iem.s.uCpl == 3)
5895 uErr |= X86_TRAP_PF_US;
5896
5897 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5898 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5899 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5900 uErr |= X86_TRAP_PF_ID;
5901
5902#if 0 /* This is so much non-sense, really. Why was it done like that? */
5903 /* Note! RW access callers reporting a WRITE protection fault, will clear
5904 the READ flag before calling. So, read-modify-write accesses (RW)
5905 can safely be reported as READ faults. */
5906 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5907 uErr |= X86_TRAP_PF_RW;
5908#else
5909 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5910 {
5911 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
5912 /// (regardless of outcome of the comparison in the latter case).
5913 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
5914 uErr |= X86_TRAP_PF_RW;
5915 }
5916#endif
5917
5918 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5919 uErr, GCPtrWhere);
5920}
5921
5922#ifdef IEM_WITH_SETJMP
5923/** \#PF(n) - 0e, longjmp. */
5924IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5925{
5926 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5927}
5928#endif
5929
5930
5931/** \#MF(0) - 10. */
5932DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5933{
5934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5935}
5936
5937
5938/** \#AC(0) - 11. */
5939DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5940{
5941 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5942}
5943
5944
5945/**
5946 * Macro for calling iemCImplRaiseDivideError().
5947 *
5948 * This enables us to add/remove arguments and force different levels of
5949 * inlining as we wish.
5950 *
5951 * @return Strict VBox status code.
5952 */
5953#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5954IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5955{
5956 NOREF(cbInstr);
5957 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5958}
5959
5960
5961/**
5962 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5963 *
5964 * This enables us to add/remove arguments and force different levels of
5965 * inlining as we wish.
5966 *
5967 * @return Strict VBox status code.
5968 */
5969#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5970IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5971{
5972 NOREF(cbInstr);
5973 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5974}
5975
5976
5977/**
5978 * Macro for calling iemCImplRaiseInvalidOpcode().
5979 *
5980 * This enables us to add/remove arguments and force different levels of
5981 * inlining as we wish.
5982 *
5983 * @return Strict VBox status code.
5984 */
5985#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5986IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5987{
5988 NOREF(cbInstr);
5989 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5990}
5991
5992
5993/** @} */
5994
5995
5996/*
5997 *
5998 * Helpers routines.
5999 * Helpers routines.
6000 * Helpers routines.
6001 *
6002 */
6003
6004/**
6005 * Recalculates the effective operand size.
6006 *
6007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6008 */
6009IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6010{
6011 switch (pVCpu->iem.s.enmCpuMode)
6012 {
6013 case IEMMODE_16BIT:
6014 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6015 break;
6016 case IEMMODE_32BIT:
6017 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6018 break;
6019 case IEMMODE_64BIT:
6020 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6021 {
6022 case 0:
6023 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6024 break;
6025 case IEM_OP_PRF_SIZE_OP:
6026 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6027 break;
6028 case IEM_OP_PRF_SIZE_REX_W:
6029 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6030 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6031 break;
6032 }
6033 break;
6034 default:
6035 AssertFailed();
6036 }
6037}
6038
6039
6040/**
6041 * Sets the default operand size to 64-bit and recalculates the effective
6042 * operand size.
6043 *
6044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6045 */
6046IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6047{
6048 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6049 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6050 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6051 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6052 else
6053 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6054}
6055
6056
6057/*
6058 *
6059 * Common opcode decoders.
6060 * Common opcode decoders.
6061 * Common opcode decoders.
6062 *
6063 */
6064//#include <iprt/mem.h>
6065
6066/**
6067 * Used to add extra details about a stub case.
6068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6069 */
6070IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6071{
6072#if defined(LOG_ENABLED) && defined(IN_RING3)
6073 PVM pVM = pVCpu->CTX_SUFF(pVM);
6074 char szRegs[4096];
6075 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6076 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6077 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6078 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6079 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6080 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6081 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6082 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6083 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6084 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6085 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6086 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6087 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6088 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6089 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6090 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6091 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6092 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6093 " efer=%016VR{efer}\n"
6094 " pat=%016VR{pat}\n"
6095 " sf_mask=%016VR{sf_mask}\n"
6096 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6097 " lstar=%016VR{lstar}\n"
6098 " star=%016VR{star} cstar=%016VR{cstar}\n"
6099 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6100 );
6101
6102 char szInstr[256];
6103 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6104 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6105 szInstr, sizeof(szInstr), NULL);
6106
6107 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6108#else
6109 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6110#endif
6111}
6112
6113/**
6114 * Complains about a stub.
6115 *
6116 * Providing two versions of this macro, one for daily use and one for use when
6117 * working on IEM.
6118 */
6119#if 0
6120# define IEMOP_BITCH_ABOUT_STUB() \
6121 do { \
6122 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6123 iemOpStubMsg2(pVCpu); \
6124 RTAssertPanic(); \
6125 } while (0)
6126#else
6127# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6128#endif
6129
6130/** Stubs an opcode. */
6131#define FNIEMOP_STUB(a_Name) \
6132 FNIEMOP_DEF(a_Name) \
6133 { \
6134 RT_NOREF_PV(pVCpu); \
6135 IEMOP_BITCH_ABOUT_STUB(); \
6136 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6137 } \
6138 typedef int ignore_semicolon
6139
6140/** Stubs an opcode. */
6141#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6142 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6143 { \
6144 RT_NOREF_PV(pVCpu); \
6145 RT_NOREF_PV(a_Name0); \
6146 IEMOP_BITCH_ABOUT_STUB(); \
6147 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6148 } \
6149 typedef int ignore_semicolon
6150
6151/** Stubs an opcode which currently should raise \#UD. */
6152#define FNIEMOP_UD_STUB(a_Name) \
6153 FNIEMOP_DEF(a_Name) \
6154 { \
6155 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6156 return IEMOP_RAISE_INVALID_OPCODE(); \
6157 } \
6158 typedef int ignore_semicolon
6159
6160/** Stubs an opcode which currently should raise \#UD. */
6161#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6162 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6163 { \
6164 RT_NOREF_PV(pVCpu); \
6165 RT_NOREF_PV(a_Name0); \
6166 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6167 return IEMOP_RAISE_INVALID_OPCODE(); \
6168 } \
6169 typedef int ignore_semicolon
6170
6171
6172
6173/** @name Register Access.
6174 * @{
6175 */
6176
6177/**
6178 * Gets a reference (pointer) to the specified hidden segment register.
6179 *
6180 * @returns Hidden register reference.
6181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6182 * @param iSegReg The segment register.
6183 */
6184IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6185{
6186 Assert(iSegReg < X86_SREG_COUNT);
6187 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6188 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6189
6190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6191 return pSReg;
6192}
6193
6194
6195/**
6196 * Ensures that the given hidden segment register is up to date.
6197 *
6198 * @returns Hidden register reference.
6199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6200 * @param pSReg The segment register.
6201 */
6202IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6203{
6204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6205 NOREF(pVCpu);
6206 return pSReg;
6207}
6208
6209
6210/**
6211 * Gets a reference (pointer) to the specified segment register (the selector
6212 * value).
6213 *
6214 * @returns Pointer to the selector variable.
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param iSegReg The segment register.
6217 */
6218DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6219{
6220 Assert(iSegReg < X86_SREG_COUNT);
6221 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6222 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6223}
6224
6225
6226/**
6227 * Fetches the selector value of a segment register.
6228 *
6229 * @returns The selector value.
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param iSegReg The segment register.
6232 */
6233DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6234{
6235 Assert(iSegReg < X86_SREG_COUNT);
6236 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6237 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6238}
6239
6240
6241/**
6242 * Fetches the base address value of a segment register.
6243 *
6244 * @returns The selector value.
6245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6246 * @param iSegReg The segment register.
6247 */
6248DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6249{
6250 Assert(iSegReg < X86_SREG_COUNT);
6251 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6252 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6253}
6254
6255
6256/**
6257 * Gets a reference (pointer) to the specified general purpose register.
6258 *
6259 * @returns Register reference.
6260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6261 * @param iReg The general purpose register.
6262 */
6263DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6264{
6265 Assert(iReg < 16);
6266 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6267}
6268
6269
6270/**
6271 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6272 *
6273 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6274 *
6275 * @returns Register reference.
6276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6277 * @param iReg The register.
6278 */
6279DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6280{
6281 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6282 {
6283 Assert(iReg < 16);
6284 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6285 }
6286 /* high 8-bit register. */
6287 Assert(iReg < 8);
6288 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6289}
6290
6291
6292/**
6293 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6294 *
6295 * @returns Register reference.
6296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6297 * @param iReg The register.
6298 */
6299DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6300{
6301 Assert(iReg < 16);
6302 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6303}
6304
6305
6306/**
6307 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6308 *
6309 * @returns Register reference.
6310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6311 * @param iReg The register.
6312 */
6313DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6314{
6315 Assert(iReg < 16);
6316 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6317}
6318
6319
6320/**
6321 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6322 *
6323 * @returns Register reference.
6324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6325 * @param iReg The register.
6326 */
6327DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6328{
6329 Assert(iReg < 64);
6330 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6331}
6332
6333
6334/**
6335 * Gets a reference (pointer) to the specified segment register's base address.
6336 *
6337 * @returns Segment register base address reference.
6338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6339 * @param iSegReg The segment selector.
6340 */
6341DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6342{
6343 Assert(iSegReg < X86_SREG_COUNT);
6344 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6345 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6346}
6347
6348
6349/**
6350 * Fetches the value of a 8-bit general purpose register.
6351 *
6352 * @returns The register value.
6353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6354 * @param iReg The register.
6355 */
6356DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6357{
6358 return *iemGRegRefU8(pVCpu, iReg);
6359}
6360
6361
6362/**
6363 * Fetches the value of a 16-bit general purpose register.
6364 *
6365 * @returns The register value.
6366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6367 * @param iReg The register.
6368 */
6369DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6370{
6371 Assert(iReg < 16);
6372 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6373}
6374
6375
6376/**
6377 * Fetches the value of a 32-bit general purpose register.
6378 *
6379 * @returns The register value.
6380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6381 * @param iReg The register.
6382 */
6383DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6384{
6385 Assert(iReg < 16);
6386 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6387}
6388
6389
6390/**
6391 * Fetches the value of a 64-bit general purpose register.
6392 *
6393 * @returns The register value.
6394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6395 * @param iReg The register.
6396 */
6397DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6398{
6399 Assert(iReg < 16);
6400 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6401}
6402
6403
6404/**
6405 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6406 *
6407 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6408 * segment limit.
6409 *
6410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6411 * @param offNextInstr The offset of the next instruction.
6412 */
6413IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6414{
6415 switch (pVCpu->iem.s.enmEffOpSize)
6416 {
6417 case IEMMODE_16BIT:
6418 {
6419 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6420 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6421 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6422 return iemRaiseGeneralProtectionFault0(pVCpu);
6423 pVCpu->cpum.GstCtx.rip = uNewIp;
6424 break;
6425 }
6426
6427 case IEMMODE_32BIT:
6428 {
6429 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6430 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6431
6432 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6433 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6434 return iemRaiseGeneralProtectionFault0(pVCpu);
6435 pVCpu->cpum.GstCtx.rip = uNewEip;
6436 break;
6437 }
6438
6439 case IEMMODE_64BIT:
6440 {
6441 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6442
6443 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6444 if (!IEM_IS_CANONICAL(uNewRip))
6445 return iemRaiseGeneralProtectionFault0(pVCpu);
6446 pVCpu->cpum.GstCtx.rip = uNewRip;
6447 break;
6448 }
6449
6450 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6451 }
6452
6453 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6454
6455#ifndef IEM_WITH_CODE_TLB
6456 /* Flush the prefetch buffer. */
6457 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6458#endif
6459
6460 return VINF_SUCCESS;
6461}
6462
6463
6464/**
6465 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6466 *
6467 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6468 * segment limit.
6469 *
6470 * @returns Strict VBox status code.
6471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6472 * @param offNextInstr The offset of the next instruction.
6473 */
6474IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6475{
6476 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6477
6478 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6479 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6480 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6481 return iemRaiseGeneralProtectionFault0(pVCpu);
6482 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6483 pVCpu->cpum.GstCtx.rip = uNewIp;
6484 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6485
6486#ifndef IEM_WITH_CODE_TLB
6487 /* Flush the prefetch buffer. */
6488 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6489#endif
6490
6491 return VINF_SUCCESS;
6492}
6493
6494
6495/**
6496 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6497 *
6498 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6499 * segment limit.
6500 *
6501 * @returns Strict VBox status code.
6502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6503 * @param offNextInstr The offset of the next instruction.
6504 */
6505IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6506{
6507 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6508
6509 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6510 {
6511 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6512
6513 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6514 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pVCpu->cpum.GstCtx.rip = uNewEip;
6517 }
6518 else
6519 {
6520 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6521
6522 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6523 if (!IEM_IS_CANONICAL(uNewRip))
6524 return iemRaiseGeneralProtectionFault0(pVCpu);
6525 pVCpu->cpum.GstCtx.rip = uNewRip;
6526 }
6527 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6528
6529#ifndef IEM_WITH_CODE_TLB
6530 /* Flush the prefetch buffer. */
6531 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6532#endif
6533
6534 return VINF_SUCCESS;
6535}
6536
6537
6538/**
6539 * Performs a near jump to the specified address.
6540 *
6541 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6542 * segment limit.
6543 *
6544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6545 * @param uNewRip The new RIP value.
6546 */
6547IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6548{
6549 switch (pVCpu->iem.s.enmEffOpSize)
6550 {
6551 case IEMMODE_16BIT:
6552 {
6553 Assert(uNewRip <= UINT16_MAX);
6554 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6555 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6556 return iemRaiseGeneralProtectionFault0(pVCpu);
6557 /** @todo Test 16-bit jump in 64-bit mode. */
6558 pVCpu->cpum.GstCtx.rip = uNewRip;
6559 break;
6560 }
6561
6562 case IEMMODE_32BIT:
6563 {
6564 Assert(uNewRip <= UINT32_MAX);
6565 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6566 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6567
6568 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6569 return iemRaiseGeneralProtectionFault0(pVCpu);
6570 pVCpu->cpum.GstCtx.rip = uNewRip;
6571 break;
6572 }
6573
6574 case IEMMODE_64BIT:
6575 {
6576 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6577
6578 if (!IEM_IS_CANONICAL(uNewRip))
6579 return iemRaiseGeneralProtectionFault0(pVCpu);
6580 pVCpu->cpum.GstCtx.rip = uNewRip;
6581 break;
6582 }
6583
6584 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6585 }
6586
6587 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6588
6589#ifndef IEM_WITH_CODE_TLB
6590 /* Flush the prefetch buffer. */
6591 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6592#endif
6593
6594 return VINF_SUCCESS;
6595}
6596
6597
6598/**
6599 * Get the address of the top of the stack.
6600 *
6601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6602 */
6603DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6604{
6605 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6606 return pVCpu->cpum.GstCtx.rsp;
6607 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6608 return pVCpu->cpum.GstCtx.esp;
6609 return pVCpu->cpum.GstCtx.sp;
6610}
6611
6612
6613/**
6614 * Updates the RIP/EIP/IP to point to the next instruction.
6615 *
6616 * This function leaves the EFLAGS.RF flag alone.
6617 *
6618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6619 * @param cbInstr The number of bytes to add.
6620 */
6621IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6622{
6623 switch (pVCpu->iem.s.enmCpuMode)
6624 {
6625 case IEMMODE_16BIT:
6626 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6627 pVCpu->cpum.GstCtx.eip += cbInstr;
6628 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6629 break;
6630
6631 case IEMMODE_32BIT:
6632 pVCpu->cpum.GstCtx.eip += cbInstr;
6633 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6634 break;
6635
6636 case IEMMODE_64BIT:
6637 pVCpu->cpum.GstCtx.rip += cbInstr;
6638 break;
6639 default: AssertFailed();
6640 }
6641}
6642
6643
6644#if 0
6645/**
6646 * Updates the RIP/EIP/IP to point to the next instruction.
6647 *
6648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6649 */
6650IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6651{
6652 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6653}
6654#endif
6655
6656
6657
6658/**
6659 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6660 *
6661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6662 * @param cbInstr The number of bytes to add.
6663 */
6664IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6665{
6666 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6667
6668 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6669#if ARCH_BITS >= 64
6670 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6671 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6672 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6673#else
6674 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6675 pVCpu->cpum.GstCtx.rip += cbInstr;
6676 else
6677 pVCpu->cpum.GstCtx.eip += cbInstr;
6678#endif
6679}
6680
6681
6682/**
6683 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6684 *
6685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6686 */
6687IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6688{
6689 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6690}
6691
6692
6693/**
6694 * Adds to the stack pointer.
6695 *
6696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6697 * @param cbToAdd The number of bytes to add (8-bit!).
6698 */
6699DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6700{
6701 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6702 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6703 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6704 pVCpu->cpum.GstCtx.esp += cbToAdd;
6705 else
6706 pVCpu->cpum.GstCtx.sp += cbToAdd;
6707}
6708
6709
6710/**
6711 * Subtracts from the stack pointer.
6712 *
6713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6714 * @param cbToSub The number of bytes to subtract (8-bit!).
6715 */
6716DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6717{
6718 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6719 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6720 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6721 pVCpu->cpum.GstCtx.esp -= cbToSub;
6722 else
6723 pVCpu->cpum.GstCtx.sp -= cbToSub;
6724}
6725
6726
6727/**
6728 * Adds to the temporary stack pointer.
6729 *
6730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6731 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6732 * @param cbToAdd The number of bytes to add (16-bit).
6733 */
6734DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6735{
6736 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6737 pTmpRsp->u += cbToAdd;
6738 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6739 pTmpRsp->DWords.dw0 += cbToAdd;
6740 else
6741 pTmpRsp->Words.w0 += cbToAdd;
6742}
6743
6744
6745/**
6746 * Subtracts from the temporary stack pointer.
6747 *
6748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6749 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6750 * @param cbToSub The number of bytes to subtract.
6751 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6752 * expecting that.
6753 */
6754DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6755{
6756 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6757 pTmpRsp->u -= cbToSub;
6758 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6759 pTmpRsp->DWords.dw0 -= cbToSub;
6760 else
6761 pTmpRsp->Words.w0 -= cbToSub;
6762}
6763
6764
6765/**
6766 * Calculates the effective stack address for a push of the specified size as
6767 * well as the new RSP value (upper bits may be masked).
6768 *
6769 * @returns Effective stack addressf for the push.
6770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6771 * @param cbItem The size of the stack item to pop.
6772 * @param puNewRsp Where to return the new RSP value.
6773 */
6774DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6775{
6776 RTUINT64U uTmpRsp;
6777 RTGCPTR GCPtrTop;
6778 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6779
6780 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6781 GCPtrTop = uTmpRsp.u -= cbItem;
6782 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6783 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6784 else
6785 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6786 *puNewRsp = uTmpRsp.u;
6787 return GCPtrTop;
6788}
6789
6790
6791/**
6792 * Gets the current stack pointer and calculates the value after a pop of the
6793 * specified size.
6794 *
6795 * @returns Current stack pointer.
6796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6797 * @param cbItem The size of the stack item to pop.
6798 * @param puNewRsp Where to return the new RSP value.
6799 */
6800DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6801{
6802 RTUINT64U uTmpRsp;
6803 RTGCPTR GCPtrTop;
6804 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6805
6806 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6807 {
6808 GCPtrTop = uTmpRsp.u;
6809 uTmpRsp.u += cbItem;
6810 }
6811 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6812 {
6813 GCPtrTop = uTmpRsp.DWords.dw0;
6814 uTmpRsp.DWords.dw0 += cbItem;
6815 }
6816 else
6817 {
6818 GCPtrTop = uTmpRsp.Words.w0;
6819 uTmpRsp.Words.w0 += cbItem;
6820 }
6821 *puNewRsp = uTmpRsp.u;
6822 return GCPtrTop;
6823}
6824
6825
6826/**
6827 * Calculates the effective stack address for a push of the specified size as
6828 * well as the new temporary RSP value (upper bits may be masked).
6829 *
6830 * @returns Effective stack addressf for the push.
6831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6832 * @param pTmpRsp The temporary stack pointer. This is updated.
6833 * @param cbItem The size of the stack item to pop.
6834 */
6835DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6836{
6837 RTGCPTR GCPtrTop;
6838
6839 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6840 GCPtrTop = pTmpRsp->u -= cbItem;
6841 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6842 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6843 else
6844 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6845 return GCPtrTop;
6846}
6847
6848
6849/**
6850 * Gets the effective stack address for a pop of the specified size and
6851 * calculates and updates the temporary RSP.
6852 *
6853 * @returns Current stack pointer.
6854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6855 * @param pTmpRsp The temporary stack pointer. This is updated.
6856 * @param cbItem The size of the stack item to pop.
6857 */
6858DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6859{
6860 RTGCPTR GCPtrTop;
6861 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6862 {
6863 GCPtrTop = pTmpRsp->u;
6864 pTmpRsp->u += cbItem;
6865 }
6866 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6867 {
6868 GCPtrTop = pTmpRsp->DWords.dw0;
6869 pTmpRsp->DWords.dw0 += cbItem;
6870 }
6871 else
6872 {
6873 GCPtrTop = pTmpRsp->Words.w0;
6874 pTmpRsp->Words.w0 += cbItem;
6875 }
6876 return GCPtrTop;
6877}
6878
6879/** @} */
6880
6881
6882/** @name FPU access and helpers.
6883 *
6884 * @{
6885 */
6886
6887
6888/**
6889 * Hook for preparing to use the host FPU.
6890 *
6891 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6892 *
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 */
6895DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6896{
6897#ifdef IN_RING3
6898 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6899#else
6900 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6901#endif
6902 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6903}
6904
6905
6906/**
6907 * Hook for preparing to use the host FPU for SSE.
6908 *
6909 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6910 *
6911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6912 */
6913DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6914{
6915 iemFpuPrepareUsage(pVCpu);
6916}
6917
6918
6919/**
6920 * Hook for preparing to use the host FPU for AVX.
6921 *
6922 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6923 *
6924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6925 */
6926DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6927{
6928 iemFpuPrepareUsage(pVCpu);
6929}
6930
6931
6932/**
6933 * Hook for actualizing the guest FPU state before the interpreter reads it.
6934 *
6935 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6936 *
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 */
6939DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6940{
6941#ifdef IN_RING3
6942 NOREF(pVCpu);
6943#else
6944 CPUMRZFpuStateActualizeForRead(pVCpu);
6945#endif
6946 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6947}
6948
6949
6950/**
6951 * Hook for actualizing the guest FPU state before the interpreter changes it.
6952 *
6953 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6954 *
6955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6956 */
6957DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6958{
6959#ifdef IN_RING3
6960 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6961#else
6962 CPUMRZFpuStateActualizeForChange(pVCpu);
6963#endif
6964 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6965}
6966
6967
6968/**
6969 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6970 * only.
6971 *
6972 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6973 *
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 */
6976DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6977{
6978#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6979 NOREF(pVCpu);
6980#else
6981 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6982#endif
6983 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6984}
6985
6986
6987/**
6988 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6989 * read+write.
6990 *
6991 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6992 *
6993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6994 */
6995DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
6996{
6997#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6998 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6999#else
7000 CPUMRZFpuStateActualizeForChange(pVCpu);
7001#endif
7002 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7003
7004 /* Make sure any changes are loaded the next time around. */
7005 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
7006}
7007
7008
7009/**
7010 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7011 * only.
7012 *
7013 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7014 *
7015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7016 */
7017DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7018{
7019#ifdef IN_RING3
7020 NOREF(pVCpu);
7021#else
7022 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7023#endif
7024 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7025}
7026
7027
7028/**
7029 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7030 * read+write.
7031 *
7032 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7033 *
7034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7035 */
7036DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7037{
7038#ifdef IN_RING3
7039 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7040#else
7041 CPUMRZFpuStateActualizeForChange(pVCpu);
7042#endif
7043 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7044
7045 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7046 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7047}
7048
7049
7050/**
7051 * Stores a QNaN value into a FPU register.
7052 *
7053 * @param pReg Pointer to the register.
7054 */
7055DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7056{
7057 pReg->au32[0] = UINT32_C(0x00000000);
7058 pReg->au32[1] = UINT32_C(0xc0000000);
7059 pReg->au16[4] = UINT16_C(0xffff);
7060}
7061
7062
7063/**
7064 * Updates the FOP, FPU.CS and FPUIP registers.
7065 *
7066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7067 * @param pFpuCtx The FPU context.
7068 */
7069DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7070{
7071 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7072 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7073 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7074 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7075 {
7076 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7077 * happens in real mode here based on the fnsave and fnstenv images. */
7078 pFpuCtx->CS = 0;
7079 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7080 }
7081 else if (!IEM_IS_LONG_MODE(pVCpu))
7082 {
7083 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7084 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7085 }
7086 else
7087 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7088}
7089
7090
7091/**
7092 * Updates the x87.DS and FPUDP registers.
7093 *
7094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7095 * @param pFpuCtx The FPU context.
7096 * @param iEffSeg The effective segment register.
7097 * @param GCPtrEff The effective address relative to @a iEffSeg.
7098 */
7099DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7100{
7101 RTSEL sel;
7102 switch (iEffSeg)
7103 {
7104 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7105 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7106 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7107 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7108 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7109 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7110 default:
7111 AssertMsgFailed(("%d\n", iEffSeg));
7112 sel = pVCpu->cpum.GstCtx.ds.Sel;
7113 }
7114 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7115 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7116 {
7117 pFpuCtx->DS = 0;
7118 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7119 }
7120 else if (!IEM_IS_LONG_MODE(pVCpu))
7121 {
7122 pFpuCtx->DS = sel;
7123 pFpuCtx->FPUDP = GCPtrEff;
7124 }
7125 else
7126 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
7127}
7128
7129
7130/**
7131 * Rotates the stack registers in the push direction.
7132 *
7133 * @param pFpuCtx The FPU context.
7134 * @remarks This is a complete waste of time, but fxsave stores the registers in
7135 * stack order.
7136 */
7137DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7138{
7139 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7140 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7141 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7142 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7143 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7144 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7145 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7146 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7147 pFpuCtx->aRegs[0].r80 = r80Tmp;
7148}
7149
7150
7151/**
7152 * Rotates the stack registers in the pop direction.
7153 *
7154 * @param pFpuCtx The FPU context.
7155 * @remarks This is a complete waste of time, but fxsave stores the registers in
7156 * stack order.
7157 */
7158DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7159{
7160 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7161 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7162 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7163 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7164 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7165 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7166 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7167 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7168 pFpuCtx->aRegs[7].r80 = r80Tmp;
7169}
7170
7171
7172/**
7173 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7174 * exception prevents it.
7175 *
7176 * @param pResult The FPU operation result to push.
7177 * @param pFpuCtx The FPU context.
7178 */
7179IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7180{
7181 /* Update FSW and bail if there are pending exceptions afterwards. */
7182 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7183 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7184 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7185 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7186 {
7187 pFpuCtx->FSW = fFsw;
7188 return;
7189 }
7190
7191 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7192 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7193 {
7194 /* All is fine, push the actual value. */
7195 pFpuCtx->FTW |= RT_BIT(iNewTop);
7196 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7197 }
7198 else if (pFpuCtx->FCW & X86_FCW_IM)
7199 {
7200 /* Masked stack overflow, push QNaN. */
7201 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7202 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7203 }
7204 else
7205 {
7206 /* Raise stack overflow, don't push anything. */
7207 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7208 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7209 return;
7210 }
7211
7212 fFsw &= ~X86_FSW_TOP_MASK;
7213 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7214 pFpuCtx->FSW = fFsw;
7215
7216 iemFpuRotateStackPush(pFpuCtx);
7217}
7218
7219
7220/**
7221 * Stores a result in a FPU register and updates the FSW and FTW.
7222 *
7223 * @param pFpuCtx The FPU context.
7224 * @param pResult The result to store.
7225 * @param iStReg Which FPU register to store it in.
7226 */
7227IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7228{
7229 Assert(iStReg < 8);
7230 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7231 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7232 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7233 pFpuCtx->FTW |= RT_BIT(iReg);
7234 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7235}
7236
7237
7238/**
7239 * Only updates the FPU status word (FSW) with the result of the current
7240 * instruction.
7241 *
7242 * @param pFpuCtx The FPU context.
7243 * @param u16FSW The FSW output of the current instruction.
7244 */
7245IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7246{
7247 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7248 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7249}
7250
7251
7252/**
7253 * Pops one item off the FPU stack if no pending exception prevents it.
7254 *
7255 * @param pFpuCtx The FPU context.
7256 */
7257IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7258{
7259 /* Check pending exceptions. */
7260 uint16_t uFSW = pFpuCtx->FSW;
7261 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7262 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7263 return;
7264
7265 /* TOP--. */
7266 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7267 uFSW &= ~X86_FSW_TOP_MASK;
7268 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7269 pFpuCtx->FSW = uFSW;
7270
7271 /* Mark the previous ST0 as empty. */
7272 iOldTop >>= X86_FSW_TOP_SHIFT;
7273 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7274
7275 /* Rotate the registers. */
7276 iemFpuRotateStackPop(pFpuCtx);
7277}
7278
7279
7280/**
7281 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7282 *
7283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7284 * @param pResult The FPU operation result to push.
7285 */
7286IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7287{
7288 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7289 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7290 iemFpuMaybePushResult(pResult, pFpuCtx);
7291}
7292
7293
7294/**
7295 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7296 * and sets FPUDP and FPUDS.
7297 *
7298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7299 * @param pResult The FPU operation result to push.
7300 * @param iEffSeg The effective segment register.
7301 * @param GCPtrEff The effective address relative to @a iEffSeg.
7302 */
7303IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7304{
7305 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7306 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7307 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7308 iemFpuMaybePushResult(pResult, pFpuCtx);
7309}
7310
7311
7312/**
7313 * Replace ST0 with the first value and push the second onto the FPU stack,
7314 * unless a pending exception prevents it.
7315 *
7316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7317 * @param pResult The FPU operation result to store and push.
7318 */
7319IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7320{
7321 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7322 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7323
7324 /* Update FSW and bail if there are pending exceptions afterwards. */
7325 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7326 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7327 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7328 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7329 {
7330 pFpuCtx->FSW = fFsw;
7331 return;
7332 }
7333
7334 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7335 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7336 {
7337 /* All is fine, push the actual value. */
7338 pFpuCtx->FTW |= RT_BIT(iNewTop);
7339 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7340 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7341 }
7342 else if (pFpuCtx->FCW & X86_FCW_IM)
7343 {
7344 /* Masked stack overflow, push QNaN. */
7345 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7346 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7347 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7348 }
7349 else
7350 {
7351 /* Raise stack overflow, don't push anything. */
7352 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7353 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7354 return;
7355 }
7356
7357 fFsw &= ~X86_FSW_TOP_MASK;
7358 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7359 pFpuCtx->FSW = fFsw;
7360
7361 iemFpuRotateStackPush(pFpuCtx);
7362}
7363
7364
7365/**
7366 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7367 * FOP.
7368 *
7369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7370 * @param pResult The result to store.
7371 * @param iStReg Which FPU register to store it in.
7372 */
7373IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7374{
7375 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7376 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7377 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7378}
7379
7380
7381/**
7382 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7383 * FOP, and then pops the stack.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 * @param pResult The result to store.
7387 * @param iStReg Which FPU register to store it in.
7388 */
7389IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7390{
7391 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7392 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7393 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7394 iemFpuMaybePopOne(pFpuCtx);
7395}
7396
7397
7398/**
7399 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7400 * FPUDP, and FPUDS.
7401 *
7402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7403 * @param pResult The result to store.
7404 * @param iStReg Which FPU register to store it in.
7405 * @param iEffSeg The effective memory operand selector register.
7406 * @param GCPtrEff The effective memory operand offset.
7407 */
7408IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7409 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7410{
7411 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7412 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7413 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7414 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7415}
7416
7417
7418/**
7419 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7420 * FPUDP, and FPUDS, and then pops the stack.
7421 *
7422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7423 * @param pResult The result to store.
7424 * @param iStReg Which FPU register to store it in.
7425 * @param iEffSeg The effective memory operand selector register.
7426 * @param GCPtrEff The effective memory operand offset.
7427 */
7428IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7429 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7430{
7431 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7432 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7433 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7434 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7435 iemFpuMaybePopOne(pFpuCtx);
7436}
7437
7438
7439/**
7440 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7441 *
7442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7443 */
7444IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7445{
7446 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7447 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7448}
7449
7450
7451/**
7452 * Marks the specified stack register as free (for FFREE).
7453 *
7454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7455 * @param iStReg The register to free.
7456 */
7457IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7458{
7459 Assert(iStReg < 8);
7460 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7461 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7462 pFpuCtx->FTW &= ~RT_BIT(iReg);
7463}
7464
7465
7466/**
7467 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7468 *
7469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7470 */
7471IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7472{
7473 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7474 uint16_t uFsw = pFpuCtx->FSW;
7475 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7476 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7477 uFsw &= ~X86_FSW_TOP_MASK;
7478 uFsw |= uTop;
7479 pFpuCtx->FSW = uFsw;
7480}
7481
7482
7483/**
7484 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7485 *
7486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7487 */
7488IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7489{
7490 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7491 uint16_t uFsw = pFpuCtx->FSW;
7492 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7493 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7494 uFsw &= ~X86_FSW_TOP_MASK;
7495 uFsw |= uTop;
7496 pFpuCtx->FSW = uFsw;
7497}
7498
7499
7500/**
7501 * Updates the FSW, FOP, FPUIP, and FPUCS.
7502 *
7503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7504 * @param u16FSW The FSW from the current instruction.
7505 */
7506IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7507{
7508 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7509 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7510 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7511}
7512
7513
7514/**
7515 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7516 *
7517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7518 * @param u16FSW The FSW from the current instruction.
7519 */
7520IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7521{
7522 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7523 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7524 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7525 iemFpuMaybePopOne(pFpuCtx);
7526}
7527
7528
7529/**
7530 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7531 *
7532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7533 * @param u16FSW The FSW from the current instruction.
7534 * @param iEffSeg The effective memory operand selector register.
7535 * @param GCPtrEff The effective memory operand offset.
7536 */
7537IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7538{
7539 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7540 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7541 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7542 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7543}
7544
7545
7546/**
7547 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7548 *
7549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7550 * @param u16FSW The FSW from the current instruction.
7551 */
7552IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7553{
7554 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7555 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7556 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7557 iemFpuMaybePopOne(pFpuCtx);
7558 iemFpuMaybePopOne(pFpuCtx);
7559}
7560
7561
7562/**
7563 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7564 *
7565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7566 * @param u16FSW The FSW from the current instruction.
7567 * @param iEffSeg The effective memory operand selector register.
7568 * @param GCPtrEff The effective memory operand offset.
7569 */
7570IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7571{
7572 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7573 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7574 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7575 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7576 iemFpuMaybePopOne(pFpuCtx);
7577}
7578
7579
7580/**
7581 * Worker routine for raising an FPU stack underflow exception.
7582 *
7583 * @param pFpuCtx The FPU context.
7584 * @param iStReg The stack register being accessed.
7585 */
7586IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7587{
7588 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7589 if (pFpuCtx->FCW & X86_FCW_IM)
7590 {
7591 /* Masked underflow. */
7592 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7593 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7594 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7595 if (iStReg != UINT8_MAX)
7596 {
7597 pFpuCtx->FTW |= RT_BIT(iReg);
7598 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7599 }
7600 }
7601 else
7602 {
7603 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7604 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7605 }
7606}
7607
7608
7609/**
7610 * Raises a FPU stack underflow exception.
7611 *
7612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7613 * @param iStReg The destination register that should be loaded
7614 * with QNaN if \#IS is not masked. Specify
7615 * UINT8_MAX if none (like for fcom).
7616 */
7617DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7618{
7619 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7620 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7621 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7622}
7623
7624
7625DECL_NO_INLINE(IEM_STATIC, void)
7626iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7627{
7628 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7629 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7630 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7631 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7632}
7633
7634
7635DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7636{
7637 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7638 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7639 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7640 iemFpuMaybePopOne(pFpuCtx);
7641}
7642
7643
7644DECL_NO_INLINE(IEM_STATIC, void)
7645iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7646{
7647 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7648 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7649 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7650 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7651 iemFpuMaybePopOne(pFpuCtx);
7652}
7653
7654
7655DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7656{
7657 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7658 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7659 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7660 iemFpuMaybePopOne(pFpuCtx);
7661 iemFpuMaybePopOne(pFpuCtx);
7662}
7663
7664
7665DECL_NO_INLINE(IEM_STATIC, void)
7666iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7667{
7668 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7669 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7670
7671 if (pFpuCtx->FCW & X86_FCW_IM)
7672 {
7673 /* Masked overflow - Push QNaN. */
7674 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7675 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7676 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7677 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7678 pFpuCtx->FTW |= RT_BIT(iNewTop);
7679 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7680 iemFpuRotateStackPush(pFpuCtx);
7681 }
7682 else
7683 {
7684 /* Exception pending - don't change TOP or the register stack. */
7685 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7686 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7687 }
7688}
7689
7690
7691DECL_NO_INLINE(IEM_STATIC, void)
7692iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7693{
7694 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7695 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7696
7697 if (pFpuCtx->FCW & X86_FCW_IM)
7698 {
7699 /* Masked overflow - Push QNaN. */
7700 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7701 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7702 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7703 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7704 pFpuCtx->FTW |= RT_BIT(iNewTop);
7705 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7706 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7707 iemFpuRotateStackPush(pFpuCtx);
7708 }
7709 else
7710 {
7711 /* Exception pending - don't change TOP or the register stack. */
7712 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7713 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7714 }
7715}
7716
7717
7718/**
7719 * Worker routine for raising an FPU stack overflow exception on a push.
7720 *
7721 * @param pFpuCtx The FPU context.
7722 */
7723IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7724{
7725 if (pFpuCtx->FCW & X86_FCW_IM)
7726 {
7727 /* Masked overflow. */
7728 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7729 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7730 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7731 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7732 pFpuCtx->FTW |= RT_BIT(iNewTop);
7733 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7734 iemFpuRotateStackPush(pFpuCtx);
7735 }
7736 else
7737 {
7738 /* Exception pending - don't change TOP or the register stack. */
7739 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7740 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7741 }
7742}
7743
7744
7745/**
7746 * Raises a FPU stack overflow exception on a push.
7747 *
7748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7749 */
7750DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7751{
7752 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7753 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7754 iemFpuStackPushOverflowOnly(pFpuCtx);
7755}
7756
7757
7758/**
7759 * Raises a FPU stack overflow exception on a push with a memory operand.
7760 *
7761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7762 * @param iEffSeg The effective memory operand selector register.
7763 * @param GCPtrEff The effective memory operand offset.
7764 */
7765DECL_NO_INLINE(IEM_STATIC, void)
7766iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7767{
7768 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7769 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7770 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7771 iemFpuStackPushOverflowOnly(pFpuCtx);
7772}
7773
7774
7775IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7776{
7777 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7778 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7779 if (pFpuCtx->FTW & RT_BIT(iReg))
7780 return VINF_SUCCESS;
7781 return VERR_NOT_FOUND;
7782}
7783
7784
7785IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7786{
7787 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7788 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7789 if (pFpuCtx->FTW & RT_BIT(iReg))
7790 {
7791 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7792 return VINF_SUCCESS;
7793 }
7794 return VERR_NOT_FOUND;
7795}
7796
7797
7798IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7799 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7800{
7801 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7802 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7803 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7804 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7805 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7806 {
7807 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7808 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7809 return VINF_SUCCESS;
7810 }
7811 return VERR_NOT_FOUND;
7812}
7813
7814
7815IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7816{
7817 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7818 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7819 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7820 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7821 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7822 {
7823 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7824 return VINF_SUCCESS;
7825 }
7826 return VERR_NOT_FOUND;
7827}
7828
7829
7830/**
7831 * Updates the FPU exception status after FCW is changed.
7832 *
7833 * @param pFpuCtx The FPU context.
7834 */
7835IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7836{
7837 uint16_t u16Fsw = pFpuCtx->FSW;
7838 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7839 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7840 else
7841 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7842 pFpuCtx->FSW = u16Fsw;
7843}
7844
7845
7846/**
7847 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7848 *
7849 * @returns The full FTW.
7850 * @param pFpuCtx The FPU context.
7851 */
7852IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7853{
7854 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7855 uint16_t u16Ftw = 0;
7856 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7857 for (unsigned iSt = 0; iSt < 8; iSt++)
7858 {
7859 unsigned const iReg = (iSt + iTop) & 7;
7860 if (!(u8Ftw & RT_BIT(iReg)))
7861 u16Ftw |= 3 << (iReg * 2); /* empty */
7862 else
7863 {
7864 uint16_t uTag;
7865 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7866 if (pr80Reg->s.uExponent == 0x7fff)
7867 uTag = 2; /* Exponent is all 1's => Special. */
7868 else if (pr80Reg->s.uExponent == 0x0000)
7869 {
7870 if (pr80Reg->s.u64Mantissa == 0x0000)
7871 uTag = 1; /* All bits are zero => Zero. */
7872 else
7873 uTag = 2; /* Must be special. */
7874 }
7875 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7876 uTag = 0; /* Valid. */
7877 else
7878 uTag = 2; /* Must be special. */
7879
7880 u16Ftw |= uTag << (iReg * 2); /* empty */
7881 }
7882 }
7883
7884 return u16Ftw;
7885}
7886
7887
7888/**
7889 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7890 *
7891 * @returns The compressed FTW.
7892 * @param u16FullFtw The full FTW to convert.
7893 */
7894IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7895{
7896 uint8_t u8Ftw = 0;
7897 for (unsigned i = 0; i < 8; i++)
7898 {
7899 if ((u16FullFtw & 3) != 3 /*empty*/)
7900 u8Ftw |= RT_BIT(i);
7901 u16FullFtw >>= 2;
7902 }
7903
7904 return u8Ftw;
7905}
7906
7907/** @} */
7908
7909
7910/** @name Memory access.
7911 *
7912 * @{
7913 */
7914
7915
7916/**
7917 * Updates the IEMCPU::cbWritten counter if applicable.
7918 *
7919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7920 * @param fAccess The access being accounted for.
7921 * @param cbMem The access size.
7922 */
7923DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7924{
7925 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7926 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7927 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7928}
7929
7930
7931/**
7932 * Checks if the given segment can be written to, raise the appropriate
7933 * exception if not.
7934 *
7935 * @returns VBox strict status code.
7936 *
7937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7938 * @param pHid Pointer to the hidden register.
7939 * @param iSegReg The register number.
7940 * @param pu64BaseAddr Where to return the base address to use for the
7941 * segment. (In 64-bit code it may differ from the
7942 * base in the hidden segment.)
7943 */
7944IEM_STATIC VBOXSTRICTRC
7945iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7946{
7947 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7948
7949 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7950 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7951 else
7952 {
7953 if (!pHid->Attr.n.u1Present)
7954 {
7955 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7956 AssertRelease(uSel == 0);
7957 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7958 return iemRaiseGeneralProtectionFault0(pVCpu);
7959 }
7960
7961 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7962 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7963 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7964 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7965 *pu64BaseAddr = pHid->u64Base;
7966 }
7967 return VINF_SUCCESS;
7968}
7969
7970
7971/**
7972 * Checks if the given segment can be read from, raise the appropriate
7973 * exception if not.
7974 *
7975 * @returns VBox strict status code.
7976 *
7977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7978 * @param pHid Pointer to the hidden register.
7979 * @param iSegReg The register number.
7980 * @param pu64BaseAddr Where to return the base address to use for the
7981 * segment. (In 64-bit code it may differ from the
7982 * base in the hidden segment.)
7983 */
7984IEM_STATIC VBOXSTRICTRC
7985iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7986{
7987 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7988
7989 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7990 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7991 else
7992 {
7993 if (!pHid->Attr.n.u1Present)
7994 {
7995 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7996 AssertRelease(uSel == 0);
7997 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7998 return iemRaiseGeneralProtectionFault0(pVCpu);
7999 }
8000
8001 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8002 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8003 *pu64BaseAddr = pHid->u64Base;
8004 }
8005 return VINF_SUCCESS;
8006}
8007
8008
8009/**
8010 * Applies the segment limit, base and attributes.
8011 *
8012 * This may raise a \#GP or \#SS.
8013 *
8014 * @returns VBox strict status code.
8015 *
8016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8017 * @param fAccess The kind of access which is being performed.
8018 * @param iSegReg The index of the segment register to apply.
8019 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8020 * TSS, ++).
8021 * @param cbMem The access size.
8022 * @param pGCPtrMem Pointer to the guest memory address to apply
8023 * segmentation to. Input and output parameter.
8024 */
8025IEM_STATIC VBOXSTRICTRC
8026iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8027{
8028 if (iSegReg == UINT8_MAX)
8029 return VINF_SUCCESS;
8030
8031 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8032 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8033 switch (pVCpu->iem.s.enmCpuMode)
8034 {
8035 case IEMMODE_16BIT:
8036 case IEMMODE_32BIT:
8037 {
8038 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8039 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8040
8041 if ( pSel->Attr.n.u1Present
8042 && !pSel->Attr.n.u1Unusable)
8043 {
8044 Assert(pSel->Attr.n.u1DescType);
8045 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8046 {
8047 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8048 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8049 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8050
8051 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8052 {
8053 /** @todo CPL check. */
8054 }
8055
8056 /*
8057 * There are two kinds of data selectors, normal and expand down.
8058 */
8059 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8060 {
8061 if ( GCPtrFirst32 > pSel->u32Limit
8062 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8063 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8064 }
8065 else
8066 {
8067 /*
8068 * The upper boundary is defined by the B bit, not the G bit!
8069 */
8070 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8071 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8072 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8073 }
8074 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8075 }
8076 else
8077 {
8078
8079 /*
8080 * Code selector and usually be used to read thru, writing is
8081 * only permitted in real and V8086 mode.
8082 */
8083 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8084 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8085 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8086 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8087 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8088
8089 if ( GCPtrFirst32 > pSel->u32Limit
8090 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8091 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8092
8093 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8094 {
8095 /** @todo CPL check. */
8096 }
8097
8098 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8099 }
8100 }
8101 else
8102 return iemRaiseGeneralProtectionFault0(pVCpu);
8103 return VINF_SUCCESS;
8104 }
8105
8106 case IEMMODE_64BIT:
8107 {
8108 RTGCPTR GCPtrMem = *pGCPtrMem;
8109 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8110 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8111
8112 Assert(cbMem >= 1);
8113 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8114 return VINF_SUCCESS;
8115 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8116 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8117 return iemRaiseGeneralProtectionFault0(pVCpu);
8118 }
8119
8120 default:
8121 AssertFailedReturn(VERR_IEM_IPE_7);
8122 }
8123}
8124
8125
8126/**
8127 * Translates a virtual address to a physical physical address and checks if we
8128 * can access the page as specified.
8129 *
8130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8131 * @param GCPtrMem The virtual address.
8132 * @param fAccess The intended access.
8133 * @param pGCPhysMem Where to return the physical address.
8134 */
8135IEM_STATIC VBOXSTRICTRC
8136iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8137{
8138 /** @todo Need a different PGM interface here. We're currently using
8139 * generic / REM interfaces. this won't cut it for R0. */
8140 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8141 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8142 * here. */
8143 PGMPTWALK Walk;
8144 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
8145 if (RT_FAILURE(rc))
8146 {
8147 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8148 /** @todo Check unassigned memory in unpaged mode. */
8149 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8150 *pGCPhysMem = NIL_RTGCPHYS;
8151 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8152 }
8153
8154 /* If the page is writable and does not have the no-exec bit set, all
8155 access is allowed. Otherwise we'll have to check more carefully... */
8156 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8157 {
8158 /* Write to read only memory? */
8159 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8160 && !(Walk.fEffective & X86_PTE_RW)
8161 && ( ( pVCpu->iem.s.uCpl == 3
8162 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8163 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8164 {
8165 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8166 *pGCPhysMem = NIL_RTGCPHYS;
8167 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8168 }
8169
8170 /* Kernel memory accessed by userland? */
8171 if ( !(Walk.fEffective & X86_PTE_US)
8172 && pVCpu->iem.s.uCpl == 3
8173 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8174 {
8175 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8176 *pGCPhysMem = NIL_RTGCPHYS;
8177 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8178 }
8179
8180 /* Executing non-executable memory? */
8181 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8182 && (Walk.fEffective & X86_PTE_PAE_NX)
8183 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8184 {
8185 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8186 *pGCPhysMem = NIL_RTGCPHYS;
8187 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8188 VERR_ACCESS_DENIED);
8189 }
8190 }
8191
8192 /*
8193 * Set the dirty / access flags.
8194 * ASSUMES this is set when the address is translated rather than on committ...
8195 */
8196 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8197 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8198 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
8199 {
8200 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8201 AssertRC(rc2);
8202 }
8203
8204 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & PAGE_OFFSET_MASK);
8205 *pGCPhysMem = GCPhys;
8206 return VINF_SUCCESS;
8207}
8208
8209
8210
8211/**
8212 * Maps a physical page.
8213 *
8214 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8216 * @param GCPhysMem The physical address.
8217 * @param fAccess The intended access.
8218 * @param ppvMem Where to return the mapping address.
8219 * @param pLock The PGM lock.
8220 */
8221IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8222{
8223#ifdef IEM_LOG_MEMORY_WRITES
8224 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8225 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8226#endif
8227
8228 /** @todo This API may require some improving later. A private deal with PGM
8229 * regarding locking and unlocking needs to be struct. A couple of TLBs
8230 * living in PGM, but with publicly accessible inlined access methods
8231 * could perhaps be an even better solution. */
8232 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8233 GCPhysMem,
8234 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8235 pVCpu->iem.s.fBypassHandlers,
8236 ppvMem,
8237 pLock);
8238 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8239 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8240
8241 return rc;
8242}
8243
8244
8245/**
8246 * Unmap a page previously mapped by iemMemPageMap.
8247 *
8248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8249 * @param GCPhysMem The physical address.
8250 * @param fAccess The intended access.
8251 * @param pvMem What iemMemPageMap returned.
8252 * @param pLock The PGM lock.
8253 */
8254DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8255{
8256 NOREF(pVCpu);
8257 NOREF(GCPhysMem);
8258 NOREF(fAccess);
8259 NOREF(pvMem);
8260 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8261}
8262
8263
8264/**
8265 * Looks up a memory mapping entry.
8266 *
8267 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8269 * @param pvMem The memory address.
8270 * @param fAccess The access to.
8271 */
8272DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8273{
8274 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8275 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8276 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8277 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8278 return 0;
8279 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8280 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8281 return 1;
8282 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8283 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8284 return 2;
8285 return VERR_NOT_FOUND;
8286}
8287
8288
8289/**
8290 * Finds a free memmap entry when using iNextMapping doesn't work.
8291 *
8292 * @returns Memory mapping index, 1024 on failure.
8293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8294 */
8295IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8296{
8297 /*
8298 * The easy case.
8299 */
8300 if (pVCpu->iem.s.cActiveMappings == 0)
8301 {
8302 pVCpu->iem.s.iNextMapping = 1;
8303 return 0;
8304 }
8305
8306 /* There should be enough mappings for all instructions. */
8307 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8308
8309 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8310 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8311 return i;
8312
8313 AssertFailedReturn(1024);
8314}
8315
8316
8317/**
8318 * Commits a bounce buffer that needs writing back and unmaps it.
8319 *
8320 * @returns Strict VBox status code.
8321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8322 * @param iMemMap The index of the buffer to commit.
8323 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8324 * Always false in ring-3, obviously.
8325 */
8326IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8327{
8328 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8329 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8330#ifdef IN_RING3
8331 Assert(!fPostponeFail);
8332 RT_NOREF_PV(fPostponeFail);
8333#endif
8334
8335 /*
8336 * Do the writing.
8337 */
8338 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8339 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8340 {
8341 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8342 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8343 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8344 if (!pVCpu->iem.s.fBypassHandlers)
8345 {
8346 /*
8347 * Carefully and efficiently dealing with access handler return
8348 * codes make this a little bloated.
8349 */
8350 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8351 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8352 pbBuf,
8353 cbFirst,
8354 PGMACCESSORIGIN_IEM);
8355 if (rcStrict == VINF_SUCCESS)
8356 {
8357 if (cbSecond)
8358 {
8359 rcStrict = PGMPhysWrite(pVM,
8360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8361 pbBuf + cbFirst,
8362 cbSecond,
8363 PGMACCESSORIGIN_IEM);
8364 if (rcStrict == VINF_SUCCESS)
8365 { /* nothing */ }
8366 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8367 {
8368 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8369 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8371 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8372 }
8373#ifndef IN_RING3
8374 else if (fPostponeFail)
8375 {
8376 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8377 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8378 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8379 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8380 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8381 return iemSetPassUpStatus(pVCpu, rcStrict);
8382 }
8383#endif
8384 else
8385 {
8386 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8387 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8388 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8389 return rcStrict;
8390 }
8391 }
8392 }
8393 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8394 {
8395 if (!cbSecond)
8396 {
8397 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8398 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8399 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8400 }
8401 else
8402 {
8403 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8405 pbBuf + cbFirst,
8406 cbSecond,
8407 PGMACCESSORIGIN_IEM);
8408 if (rcStrict2 == VINF_SUCCESS)
8409 {
8410 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8413 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8414 }
8415 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8416 {
8417 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8419 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8420 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8421 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8422 }
8423#ifndef IN_RING3
8424 else if (fPostponeFail)
8425 {
8426 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8429 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8430 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8431 return iemSetPassUpStatus(pVCpu, rcStrict);
8432 }
8433#endif
8434 else
8435 {
8436 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8437 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8439 return rcStrict2;
8440 }
8441 }
8442 }
8443#ifndef IN_RING3
8444 else if (fPostponeFail)
8445 {
8446 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8447 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8448 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8449 if (!cbSecond)
8450 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8451 else
8452 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8453 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8454 return iemSetPassUpStatus(pVCpu, rcStrict);
8455 }
8456#endif
8457 else
8458 {
8459 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8460 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8461 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8462 return rcStrict;
8463 }
8464 }
8465 else
8466 {
8467 /*
8468 * No access handlers, much simpler.
8469 */
8470 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8471 if (RT_SUCCESS(rc))
8472 {
8473 if (cbSecond)
8474 {
8475 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8476 if (RT_SUCCESS(rc))
8477 { /* likely */ }
8478 else
8479 {
8480 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8481 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8483 return rc;
8484 }
8485 }
8486 }
8487 else
8488 {
8489 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8490 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8491 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8492 return rc;
8493 }
8494 }
8495 }
8496
8497#if defined(IEM_LOG_MEMORY_WRITES)
8498 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8499 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8500 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8501 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8502 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8503 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8504
8505 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8506 g_cbIemWrote = cbWrote;
8507 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8508#endif
8509
8510 /*
8511 * Free the mapping entry.
8512 */
8513 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8514 Assert(pVCpu->iem.s.cActiveMappings != 0);
8515 pVCpu->iem.s.cActiveMappings--;
8516 return VINF_SUCCESS;
8517}
8518
8519
8520/**
8521 * iemMemMap worker that deals with a request crossing pages.
8522 */
8523IEM_STATIC VBOXSTRICTRC
8524iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8525{
8526 /*
8527 * Do the address translations.
8528 */
8529 RTGCPHYS GCPhysFirst;
8530 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8531 if (rcStrict != VINF_SUCCESS)
8532 return rcStrict;
8533
8534 RTGCPHYS GCPhysSecond;
8535 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8536 fAccess, &GCPhysSecond);
8537 if (rcStrict != VINF_SUCCESS)
8538 return rcStrict;
8539 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8540
8541 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8542
8543 /*
8544 * Read in the current memory content if it's a read, execute or partial
8545 * write access.
8546 */
8547 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8548 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8549 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8550
8551 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8552 {
8553 if (!pVCpu->iem.s.fBypassHandlers)
8554 {
8555 /*
8556 * Must carefully deal with access handler status codes here,
8557 * makes the code a bit bloated.
8558 */
8559 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8560 if (rcStrict == VINF_SUCCESS)
8561 {
8562 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8563 if (rcStrict == VINF_SUCCESS)
8564 { /*likely */ }
8565 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8566 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8567 else
8568 {
8569 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8570 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8571 return rcStrict;
8572 }
8573 }
8574 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8575 {
8576 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8577 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8578 {
8579 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8580 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8581 }
8582 else
8583 {
8584 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8585 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8586 return rcStrict2;
8587 }
8588 }
8589 else
8590 {
8591 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8592 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8593 return rcStrict;
8594 }
8595 }
8596 else
8597 {
8598 /*
8599 * No informational status codes here, much more straight forward.
8600 */
8601 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8602 if (RT_SUCCESS(rc))
8603 {
8604 Assert(rc == VINF_SUCCESS);
8605 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8606 if (RT_SUCCESS(rc))
8607 Assert(rc == VINF_SUCCESS);
8608 else
8609 {
8610 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8611 return rc;
8612 }
8613 }
8614 else
8615 {
8616 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8617 return rc;
8618 }
8619 }
8620 }
8621#ifdef VBOX_STRICT
8622 else
8623 memset(pbBuf, 0xcc, cbMem);
8624 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8625 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8626#endif
8627
8628 /*
8629 * Commit the bounce buffer entry.
8630 */
8631 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8632 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8633 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8634 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8635 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8636 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8637 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8638 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8639 pVCpu->iem.s.cActiveMappings++;
8640
8641 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8642 *ppvMem = pbBuf;
8643 return VINF_SUCCESS;
8644}
8645
8646
8647/**
8648 * iemMemMap woker that deals with iemMemPageMap failures.
8649 */
8650IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8651 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8652{
8653 /*
8654 * Filter out conditions we can handle and the ones which shouldn't happen.
8655 */
8656 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8657 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8658 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8659 {
8660 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8661 return rcMap;
8662 }
8663 pVCpu->iem.s.cPotentialExits++;
8664
8665 /*
8666 * Read in the current memory content if it's a read, execute or partial
8667 * write access.
8668 */
8669 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8670 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8671 {
8672 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8673 memset(pbBuf, 0xff, cbMem);
8674 else
8675 {
8676 int rc;
8677 if (!pVCpu->iem.s.fBypassHandlers)
8678 {
8679 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8680 if (rcStrict == VINF_SUCCESS)
8681 { /* nothing */ }
8682 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8683 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8684 else
8685 {
8686 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8687 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8688 return rcStrict;
8689 }
8690 }
8691 else
8692 {
8693 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8694 if (RT_SUCCESS(rc))
8695 { /* likely */ }
8696 else
8697 {
8698 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8699 GCPhysFirst, rc));
8700 return rc;
8701 }
8702 }
8703 }
8704 }
8705#ifdef VBOX_STRICT
8706 else
8707 memset(pbBuf, 0xcc, cbMem);
8708#endif
8709#ifdef VBOX_STRICT
8710 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8711 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8712#endif
8713
8714 /*
8715 * Commit the bounce buffer entry.
8716 */
8717 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8718 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8719 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8720 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8721 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8722 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8723 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8724 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8725 pVCpu->iem.s.cActiveMappings++;
8726
8727 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8728 *ppvMem = pbBuf;
8729 return VINF_SUCCESS;
8730}
8731
8732
8733
8734/**
8735 * Maps the specified guest memory for the given kind of access.
8736 *
8737 * This may be using bounce buffering of the memory if it's crossing a page
8738 * boundary or if there is an access handler installed for any of it. Because
8739 * of lock prefix guarantees, we're in for some extra clutter when this
8740 * happens.
8741 *
8742 * This may raise a \#GP, \#SS, \#PF or \#AC.
8743 *
8744 * @returns VBox strict status code.
8745 *
8746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8747 * @param ppvMem Where to return the pointer to the mapped
8748 * memory.
8749 * @param cbMem The number of bytes to map. This is usually 1,
8750 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8751 * string operations it can be up to a page.
8752 * @param iSegReg The index of the segment register to use for
8753 * this access. The base and limits are checked.
8754 * Use UINT8_MAX to indicate that no segmentation
8755 * is required (for IDT, GDT and LDT accesses).
8756 * @param GCPtrMem The address of the guest memory.
8757 * @param fAccess How the memory is being accessed. The
8758 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8759 * how to map the memory, while the
8760 * IEM_ACCESS_WHAT_XXX bit is used when raising
8761 * exceptions.
8762 */
8763IEM_STATIC VBOXSTRICTRC
8764iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8765{
8766 /*
8767 * Check the input and figure out which mapping entry to use.
8768 */
8769 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8770 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8771 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8772
8773 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8774 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8775 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8776 {
8777 iMemMap = iemMemMapFindFree(pVCpu);
8778 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8779 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8780 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8781 pVCpu->iem.s.aMemMappings[2].fAccess),
8782 VERR_IEM_IPE_9);
8783 }
8784
8785 /*
8786 * Map the memory, checking that we can actually access it. If something
8787 * slightly complicated happens, fall back on bounce buffering.
8788 */
8789 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8790 if (rcStrict != VINF_SUCCESS)
8791 return rcStrict;
8792
8793 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8794 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8795
8796 RTGCPHYS GCPhysFirst;
8797 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8798 if (rcStrict != VINF_SUCCESS)
8799 return rcStrict;
8800
8801 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8802 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8803 if (fAccess & IEM_ACCESS_TYPE_READ)
8804 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8805
8806 void *pvMem;
8807 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8808 if (rcStrict != VINF_SUCCESS)
8809 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8810
8811 /*
8812 * Fill in the mapping table entry.
8813 */
8814 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8815 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8816 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8817 pVCpu->iem.s.cActiveMappings++;
8818
8819 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8820 *ppvMem = pvMem;
8821
8822 return VINF_SUCCESS;
8823}
8824
8825
8826/**
8827 * Commits the guest memory if bounce buffered and unmaps it.
8828 *
8829 * @returns Strict VBox status code.
8830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8831 * @param pvMem The mapping.
8832 * @param fAccess The kind of access.
8833 */
8834IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8835{
8836 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8837 AssertReturn(iMemMap >= 0, iMemMap);
8838
8839 /* If it's bounce buffered, we may need to write back the buffer. */
8840 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8841 {
8842 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8843 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8844 }
8845 /* Otherwise unlock it. */
8846 else
8847 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8848
8849 /* Free the entry. */
8850 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8851 Assert(pVCpu->iem.s.cActiveMappings != 0);
8852 pVCpu->iem.s.cActiveMappings--;
8853 return VINF_SUCCESS;
8854}
8855
8856#ifdef IEM_WITH_SETJMP
8857
8858/**
8859 * Maps the specified guest memory for the given kind of access, longjmp on
8860 * error.
8861 *
8862 * This may be using bounce buffering of the memory if it's crossing a page
8863 * boundary or if there is an access handler installed for any of it. Because
8864 * of lock prefix guarantees, we're in for some extra clutter when this
8865 * happens.
8866 *
8867 * This may raise a \#GP, \#SS, \#PF or \#AC.
8868 *
8869 * @returns Pointer to the mapped memory.
8870 *
8871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8872 * @param cbMem The number of bytes to map. This is usually 1,
8873 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8874 * string operations it can be up to a page.
8875 * @param iSegReg The index of the segment register to use for
8876 * this access. The base and limits are checked.
8877 * Use UINT8_MAX to indicate that no segmentation
8878 * is required (for IDT, GDT and LDT accesses).
8879 * @param GCPtrMem The address of the guest memory.
8880 * @param fAccess How the memory is being accessed. The
8881 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8882 * how to map the memory, while the
8883 * IEM_ACCESS_WHAT_XXX bit is used when raising
8884 * exceptions.
8885 */
8886IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8887{
8888 /*
8889 * Check the input and figure out which mapping entry to use.
8890 */
8891 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8892 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8893 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8894
8895 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8896 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8897 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8898 {
8899 iMemMap = iemMemMapFindFree(pVCpu);
8900 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8901 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8902 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8903 pVCpu->iem.s.aMemMappings[2].fAccess),
8904 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8905 }
8906
8907 /*
8908 * Map the memory, checking that we can actually access it. If something
8909 * slightly complicated happens, fall back on bounce buffering.
8910 */
8911 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8912 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8913 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8914
8915 /* Crossing a page boundary? */
8916 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8917 { /* No (likely). */ }
8918 else
8919 {
8920 void *pvMem;
8921 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8922 if (rcStrict == VINF_SUCCESS)
8923 return pvMem;
8924 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8925 }
8926
8927 RTGCPHYS GCPhysFirst;
8928 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8929 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8930 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8931
8932 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8933 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8934 if (fAccess & IEM_ACCESS_TYPE_READ)
8935 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8936
8937 void *pvMem;
8938 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8939 if (rcStrict == VINF_SUCCESS)
8940 { /* likely */ }
8941 else
8942 {
8943 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8944 if (rcStrict == VINF_SUCCESS)
8945 return pvMem;
8946 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8947 }
8948
8949 /*
8950 * Fill in the mapping table entry.
8951 */
8952 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8953 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8954 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8955 pVCpu->iem.s.cActiveMappings++;
8956
8957 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8958 return pvMem;
8959}
8960
8961
8962/**
8963 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8964 *
8965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8966 * @param pvMem The mapping.
8967 * @param fAccess The kind of access.
8968 */
8969IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8970{
8971 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8972 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8973
8974 /* If it's bounce buffered, we may need to write back the buffer. */
8975 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8976 {
8977 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8978 {
8979 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8980 if (rcStrict == VINF_SUCCESS)
8981 return;
8982 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8983 }
8984 }
8985 /* Otherwise unlock it. */
8986 else
8987 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8988
8989 /* Free the entry. */
8990 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8991 Assert(pVCpu->iem.s.cActiveMappings != 0);
8992 pVCpu->iem.s.cActiveMappings--;
8993}
8994
8995#endif /* IEM_WITH_SETJMP */
8996
8997#ifndef IN_RING3
8998/**
8999 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9000 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9001 *
9002 * Allows the instruction to be completed and retired, while the IEM user will
9003 * return to ring-3 immediately afterwards and do the postponed writes there.
9004 *
9005 * @returns VBox status code (no strict statuses). Caller must check
9006 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9008 * @param pvMem The mapping.
9009 * @param fAccess The kind of access.
9010 */
9011IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9012{
9013 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9014 AssertReturn(iMemMap >= 0, iMemMap);
9015
9016 /* If it's bounce buffered, we may need to write back the buffer. */
9017 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9018 {
9019 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9020 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9021 }
9022 /* Otherwise unlock it. */
9023 else
9024 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9025
9026 /* Free the entry. */
9027 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9028 Assert(pVCpu->iem.s.cActiveMappings != 0);
9029 pVCpu->iem.s.cActiveMappings--;
9030 return VINF_SUCCESS;
9031}
9032#endif
9033
9034
9035/**
9036 * Rollbacks mappings, releasing page locks and such.
9037 *
9038 * The caller shall only call this after checking cActiveMappings.
9039 *
9040 * @returns Strict VBox status code to pass up.
9041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9042 */
9043IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9044{
9045 Assert(pVCpu->iem.s.cActiveMappings > 0);
9046
9047 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9048 while (iMemMap-- > 0)
9049 {
9050 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9051 if (fAccess != IEM_ACCESS_INVALID)
9052 {
9053 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9054 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9055 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9056 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9057 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9058 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9059 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9060 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9061 pVCpu->iem.s.cActiveMappings--;
9062 }
9063 }
9064}
9065
9066
9067/**
9068 * Fetches a data byte.
9069 *
9070 * @returns Strict VBox status code.
9071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9072 * @param pu8Dst Where to return the byte.
9073 * @param iSegReg The index of the segment register to use for
9074 * this access. The base and limits are checked.
9075 * @param GCPtrMem The address of the guest memory.
9076 */
9077IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9078{
9079 /* The lazy approach for now... */
9080 uint8_t const *pu8Src;
9081 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9082 if (rc == VINF_SUCCESS)
9083 {
9084 *pu8Dst = *pu8Src;
9085 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9086 }
9087 return rc;
9088}
9089
9090
9091#ifdef IEM_WITH_SETJMP
9092/**
9093 * Fetches a data byte, longjmp on error.
9094 *
9095 * @returns The byte.
9096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9097 * @param iSegReg The index of the segment register to use for
9098 * this access. The base and limits are checked.
9099 * @param GCPtrMem The address of the guest memory.
9100 */
9101DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9102{
9103 /* The lazy approach for now... */
9104 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9105 uint8_t const bRet = *pu8Src;
9106 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9107 return bRet;
9108}
9109#endif /* IEM_WITH_SETJMP */
9110
9111
9112/**
9113 * Fetches a data word.
9114 *
9115 * @returns Strict VBox status code.
9116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9117 * @param pu16Dst Where to return the word.
9118 * @param iSegReg The index of the segment register to use for
9119 * this access. The base and limits are checked.
9120 * @param GCPtrMem The address of the guest memory.
9121 */
9122IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9123{
9124 /* The lazy approach for now... */
9125 uint16_t const *pu16Src;
9126 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9127 if (rc == VINF_SUCCESS)
9128 {
9129 *pu16Dst = *pu16Src;
9130 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9131 }
9132 return rc;
9133}
9134
9135
9136#ifdef IEM_WITH_SETJMP
9137/**
9138 * Fetches a data word, longjmp on error.
9139 *
9140 * @returns The word
9141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9142 * @param iSegReg The index of the segment register to use for
9143 * this access. The base and limits are checked.
9144 * @param GCPtrMem The address of the guest memory.
9145 */
9146DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9147{
9148 /* The lazy approach for now... */
9149 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9150 uint16_t const u16Ret = *pu16Src;
9151 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9152 return u16Ret;
9153}
9154#endif
9155
9156
9157/**
9158 * Fetches a data dword.
9159 *
9160 * @returns Strict VBox status code.
9161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9162 * @param pu32Dst Where to return the dword.
9163 * @param iSegReg The index of the segment register to use for
9164 * this access. The base and limits are checked.
9165 * @param GCPtrMem The address of the guest memory.
9166 */
9167IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9168{
9169 /* The lazy approach for now... */
9170 uint32_t const *pu32Src;
9171 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9172 if (rc == VINF_SUCCESS)
9173 {
9174 *pu32Dst = *pu32Src;
9175 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9176 }
9177 return rc;
9178}
9179
9180
9181/**
9182 * Fetches a data dword and zero extends it to a qword.
9183 *
9184 * @returns Strict VBox status code.
9185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9186 * @param pu64Dst Where to return the qword.
9187 * @param iSegReg The index of the segment register to use for
9188 * this access. The base and limits are checked.
9189 * @param GCPtrMem The address of the guest memory.
9190 */
9191IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9192{
9193 /* The lazy approach for now... */
9194 uint32_t const *pu32Src;
9195 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9196 if (rc == VINF_SUCCESS)
9197 {
9198 *pu64Dst = *pu32Src;
9199 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9200 }
9201 return rc;
9202}
9203
9204
9205#ifdef IEM_WITH_SETJMP
9206
9207IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9208{
9209 Assert(cbMem >= 1);
9210 Assert(iSegReg < X86_SREG_COUNT);
9211
9212 /*
9213 * 64-bit mode is simpler.
9214 */
9215 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9216 {
9217 if (iSegReg >= X86_SREG_FS)
9218 {
9219 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9220 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9221 GCPtrMem += pSel->u64Base;
9222 }
9223
9224 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9225 return GCPtrMem;
9226 }
9227 /*
9228 * 16-bit and 32-bit segmentation.
9229 */
9230 else
9231 {
9232 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9233 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9234 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9235 == X86DESCATTR_P /* data, expand up */
9236 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9237 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9238 {
9239 /* expand up */
9240 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9241 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9242 && GCPtrLast32 > (uint32_t)GCPtrMem))
9243 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9244 }
9245 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9246 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9247 {
9248 /* expand down */
9249 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9250 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9251 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9252 && GCPtrLast32 > (uint32_t)GCPtrMem))
9253 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9254 }
9255 else
9256 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9257 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9258 }
9259 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9260}
9261
9262
9263IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9264{
9265 Assert(cbMem >= 1);
9266 Assert(iSegReg < X86_SREG_COUNT);
9267
9268 /*
9269 * 64-bit mode is simpler.
9270 */
9271 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9272 {
9273 if (iSegReg >= X86_SREG_FS)
9274 {
9275 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9276 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9277 GCPtrMem += pSel->u64Base;
9278 }
9279
9280 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9281 return GCPtrMem;
9282 }
9283 /*
9284 * 16-bit and 32-bit segmentation.
9285 */
9286 else
9287 {
9288 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9289 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9290 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9291 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9292 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9293 {
9294 /* expand up */
9295 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9296 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9297 && GCPtrLast32 > (uint32_t)GCPtrMem))
9298 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9299 }
9300 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9301 {
9302 /* expand down */
9303 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9304 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9305 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9306 && GCPtrLast32 > (uint32_t)GCPtrMem))
9307 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9308 }
9309 else
9310 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9311 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9312 }
9313 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9314}
9315
9316
9317/**
9318 * Fetches a data dword, longjmp on error, fallback/safe version.
9319 *
9320 * @returns The dword
9321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9322 * @param iSegReg The index of the segment register to use for
9323 * this access. The base and limits are checked.
9324 * @param GCPtrMem The address of the guest memory.
9325 */
9326IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9327{
9328 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9329 uint32_t const u32Ret = *pu32Src;
9330 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9331 return u32Ret;
9332}
9333
9334
9335/**
9336 * Fetches a data dword, longjmp on error.
9337 *
9338 * @returns The dword
9339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9340 * @param iSegReg The index of the segment register to use for
9341 * this access. The base and limits are checked.
9342 * @param GCPtrMem The address of the guest memory.
9343 */
9344DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9345{
9346# ifdef IEM_WITH_DATA_TLB
9347 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9348 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9349 {
9350 /// @todo more later.
9351 }
9352
9353 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9354# else
9355 /* The lazy approach. */
9356 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9357 uint32_t const u32Ret = *pu32Src;
9358 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9359 return u32Ret;
9360# endif
9361}
9362#endif
9363
9364
9365#ifdef SOME_UNUSED_FUNCTION
9366/**
9367 * Fetches a data dword and sign extends it to a qword.
9368 *
9369 * @returns Strict VBox status code.
9370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9371 * @param pu64Dst Where to return the sign extended value.
9372 * @param iSegReg The index of the segment register to use for
9373 * this access. The base and limits are checked.
9374 * @param GCPtrMem The address of the guest memory.
9375 */
9376IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9377{
9378 /* The lazy approach for now... */
9379 int32_t const *pi32Src;
9380 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9381 if (rc == VINF_SUCCESS)
9382 {
9383 *pu64Dst = *pi32Src;
9384 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9385 }
9386#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9387 else
9388 *pu64Dst = 0;
9389#endif
9390 return rc;
9391}
9392#endif
9393
9394
9395/**
9396 * Fetches a data qword.
9397 *
9398 * @returns Strict VBox status code.
9399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9400 * @param pu64Dst Where to return the qword.
9401 * @param iSegReg The index of the segment register to use for
9402 * this access. The base and limits are checked.
9403 * @param GCPtrMem The address of the guest memory.
9404 */
9405IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9406{
9407 /* The lazy approach for now... */
9408 uint64_t const *pu64Src;
9409 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9410 if (rc == VINF_SUCCESS)
9411 {
9412 *pu64Dst = *pu64Src;
9413 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9414 }
9415 return rc;
9416}
9417
9418
9419#ifdef IEM_WITH_SETJMP
9420/**
9421 * Fetches a data qword, longjmp on error.
9422 *
9423 * @returns The qword.
9424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9425 * @param iSegReg The index of the segment register to use for
9426 * this access. The base and limits are checked.
9427 * @param GCPtrMem The address of the guest memory.
9428 */
9429DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9430{
9431 /* The lazy approach for now... */
9432 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9433 uint64_t const u64Ret = *pu64Src;
9434 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9435 return u64Ret;
9436}
9437#endif
9438
9439
9440/**
9441 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9442 *
9443 * @returns Strict VBox status code.
9444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9445 * @param pu64Dst Where to return the qword.
9446 * @param iSegReg The index of the segment register to use for
9447 * this access. The base and limits are checked.
9448 * @param GCPtrMem The address of the guest memory.
9449 */
9450IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9451{
9452 /* The lazy approach for now... */
9453 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9454 if (RT_UNLIKELY(GCPtrMem & 15))
9455 return iemRaiseGeneralProtectionFault0(pVCpu);
9456
9457 uint64_t const *pu64Src;
9458 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9459 if (rc == VINF_SUCCESS)
9460 {
9461 *pu64Dst = *pu64Src;
9462 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9463 }
9464 return rc;
9465}
9466
9467
9468#ifdef IEM_WITH_SETJMP
9469/**
9470 * Fetches a data qword, longjmp on error.
9471 *
9472 * @returns The qword.
9473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9474 * @param iSegReg The index of the segment register to use for
9475 * this access. The base and limits are checked.
9476 * @param GCPtrMem The address of the guest memory.
9477 */
9478DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9479{
9480 /* The lazy approach for now... */
9481 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9482 if (RT_LIKELY(!(GCPtrMem & 15)))
9483 {
9484 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9485 uint64_t const u64Ret = *pu64Src;
9486 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9487 return u64Ret;
9488 }
9489
9490 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9491 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9492}
9493#endif
9494
9495
9496/**
9497 * Fetches a data tword.
9498 *
9499 * @returns Strict VBox status code.
9500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9501 * @param pr80Dst Where to return the tword.
9502 * @param iSegReg The index of the segment register to use for
9503 * this access. The base and limits are checked.
9504 * @param GCPtrMem The address of the guest memory.
9505 */
9506IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9507{
9508 /* The lazy approach for now... */
9509 PCRTFLOAT80U pr80Src;
9510 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9511 if (rc == VINF_SUCCESS)
9512 {
9513 *pr80Dst = *pr80Src;
9514 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9515 }
9516 return rc;
9517}
9518
9519
9520#ifdef IEM_WITH_SETJMP
9521/**
9522 * Fetches a data tword, longjmp on error.
9523 *
9524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9525 * @param pr80Dst Where to return the tword.
9526 * @param iSegReg The index of the segment register to use for
9527 * this access. The base and limits are checked.
9528 * @param GCPtrMem The address of the guest memory.
9529 */
9530DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9531{
9532 /* The lazy approach for now... */
9533 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9534 *pr80Dst = *pr80Src;
9535 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9536}
9537#endif
9538
9539
9540/**
9541 * Fetches a data dqword (double qword), generally SSE related.
9542 *
9543 * @returns Strict VBox status code.
9544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9545 * @param pu128Dst Where to return the qword.
9546 * @param iSegReg The index of the segment register to use for
9547 * this access. The base and limits are checked.
9548 * @param GCPtrMem The address of the guest memory.
9549 */
9550IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9551{
9552 /* The lazy approach for now... */
9553 PCRTUINT128U pu128Src;
9554 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9555 if (rc == VINF_SUCCESS)
9556 {
9557 pu128Dst->au64[0] = pu128Src->au64[0];
9558 pu128Dst->au64[1] = pu128Src->au64[1];
9559 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9560 }
9561 return rc;
9562}
9563
9564
9565#ifdef IEM_WITH_SETJMP
9566/**
9567 * Fetches a data dqword (double qword), generally SSE related.
9568 *
9569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9570 * @param pu128Dst Where to return the qword.
9571 * @param iSegReg The index of the segment register to use for
9572 * this access. The base and limits are checked.
9573 * @param GCPtrMem The address of the guest memory.
9574 */
9575IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9576{
9577 /* The lazy approach for now... */
9578 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9579 pu128Dst->au64[0] = pu128Src->au64[0];
9580 pu128Dst->au64[1] = pu128Src->au64[1];
9581 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9582}
9583#endif
9584
9585
9586/**
9587 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9588 * related.
9589 *
9590 * Raises \#GP(0) if not aligned.
9591 *
9592 * @returns Strict VBox status code.
9593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9594 * @param pu128Dst Where to return the qword.
9595 * @param iSegReg The index of the segment register to use for
9596 * this access. The base and limits are checked.
9597 * @param GCPtrMem The address of the guest memory.
9598 */
9599IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9600{
9601 /* The lazy approach for now... */
9602 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9603 if ( (GCPtrMem & 15)
9604 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9605 return iemRaiseGeneralProtectionFault0(pVCpu);
9606
9607 PCRTUINT128U pu128Src;
9608 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9609 if (rc == VINF_SUCCESS)
9610 {
9611 pu128Dst->au64[0] = pu128Src->au64[0];
9612 pu128Dst->au64[1] = pu128Src->au64[1];
9613 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9614 }
9615 return rc;
9616}
9617
9618
9619#ifdef IEM_WITH_SETJMP
9620/**
9621 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9622 * related, longjmp on error.
9623 *
9624 * Raises \#GP(0) if not aligned.
9625 *
9626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9627 * @param pu128Dst Where to return the qword.
9628 * @param iSegReg The index of the segment register to use for
9629 * this access. The base and limits are checked.
9630 * @param GCPtrMem The address of the guest memory.
9631 */
9632DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9633{
9634 /* The lazy approach for now... */
9635 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9636 if ( (GCPtrMem & 15) == 0
9637 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9638 {
9639 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9640 pu128Dst->au64[0] = pu128Src->au64[0];
9641 pu128Dst->au64[1] = pu128Src->au64[1];
9642 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9643 return;
9644 }
9645
9646 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9647 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9648}
9649#endif
9650
9651
9652/**
9653 * Fetches a data oword (octo word), generally AVX related.
9654 *
9655 * @returns Strict VBox status code.
9656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9657 * @param pu256Dst Where to return the qword.
9658 * @param iSegReg The index of the segment register to use for
9659 * this access. The base and limits are checked.
9660 * @param GCPtrMem The address of the guest memory.
9661 */
9662IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9663{
9664 /* The lazy approach for now... */
9665 PCRTUINT256U pu256Src;
9666 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9667 if (rc == VINF_SUCCESS)
9668 {
9669 pu256Dst->au64[0] = pu256Src->au64[0];
9670 pu256Dst->au64[1] = pu256Src->au64[1];
9671 pu256Dst->au64[2] = pu256Src->au64[2];
9672 pu256Dst->au64[3] = pu256Src->au64[3];
9673 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9674 }
9675 return rc;
9676}
9677
9678
9679#ifdef IEM_WITH_SETJMP
9680/**
9681 * Fetches a data oword (octo word), generally AVX related.
9682 *
9683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9684 * @param pu256Dst Where to return the qword.
9685 * @param iSegReg The index of the segment register to use for
9686 * this access. The base and limits are checked.
9687 * @param GCPtrMem The address of the guest memory.
9688 */
9689IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9690{
9691 /* The lazy approach for now... */
9692 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9693 pu256Dst->au64[0] = pu256Src->au64[0];
9694 pu256Dst->au64[1] = pu256Src->au64[1];
9695 pu256Dst->au64[2] = pu256Src->au64[2];
9696 pu256Dst->au64[3] = pu256Src->au64[3];
9697 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9698}
9699#endif
9700
9701
9702/**
9703 * Fetches a data oword (octo word) at an aligned address, generally AVX
9704 * related.
9705 *
9706 * Raises \#GP(0) if not aligned.
9707 *
9708 * @returns Strict VBox status code.
9709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9710 * @param pu256Dst Where to return the qword.
9711 * @param iSegReg The index of the segment register to use for
9712 * this access. The base and limits are checked.
9713 * @param GCPtrMem The address of the guest memory.
9714 */
9715IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9716{
9717 /* The lazy approach for now... */
9718 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9719 if (GCPtrMem & 31)
9720 return iemRaiseGeneralProtectionFault0(pVCpu);
9721
9722 PCRTUINT256U pu256Src;
9723 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9724 if (rc == VINF_SUCCESS)
9725 {
9726 pu256Dst->au64[0] = pu256Src->au64[0];
9727 pu256Dst->au64[1] = pu256Src->au64[1];
9728 pu256Dst->au64[2] = pu256Src->au64[2];
9729 pu256Dst->au64[3] = pu256Src->au64[3];
9730 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9731 }
9732 return rc;
9733}
9734
9735
9736#ifdef IEM_WITH_SETJMP
9737/**
9738 * Fetches a data oword (octo word) at an aligned address, generally AVX
9739 * related, longjmp on error.
9740 *
9741 * Raises \#GP(0) if not aligned.
9742 *
9743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9744 * @param pu256Dst Where to return the qword.
9745 * @param iSegReg The index of the segment register to use for
9746 * this access. The base and limits are checked.
9747 * @param GCPtrMem The address of the guest memory.
9748 */
9749DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9750{
9751 /* The lazy approach for now... */
9752 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9753 if ((GCPtrMem & 31) == 0)
9754 {
9755 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9756 pu256Dst->au64[0] = pu256Src->au64[0];
9757 pu256Dst->au64[1] = pu256Src->au64[1];
9758 pu256Dst->au64[2] = pu256Src->au64[2];
9759 pu256Dst->au64[3] = pu256Src->au64[3];
9760 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9761 return;
9762 }
9763
9764 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9765 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9766}
9767#endif
9768
9769
9770
9771/**
9772 * Fetches a descriptor register (lgdt, lidt).
9773 *
9774 * @returns Strict VBox status code.
9775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9776 * @param pcbLimit Where to return the limit.
9777 * @param pGCPtrBase Where to return the base.
9778 * @param iSegReg The index of the segment register to use for
9779 * this access. The base and limits are checked.
9780 * @param GCPtrMem The address of the guest memory.
9781 * @param enmOpSize The effective operand size.
9782 */
9783IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9784 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9785{
9786 /*
9787 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9788 * little special:
9789 * - The two reads are done separately.
9790 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9791 * - We suspect the 386 to actually commit the limit before the base in
9792 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9793 * don't try emulate this eccentric behavior, because it's not well
9794 * enough understood and rather hard to trigger.
9795 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9796 */
9797 VBOXSTRICTRC rcStrict;
9798 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9799 {
9800 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9801 if (rcStrict == VINF_SUCCESS)
9802 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9803 }
9804 else
9805 {
9806 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9807 if (enmOpSize == IEMMODE_32BIT)
9808 {
9809 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9810 {
9811 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9812 if (rcStrict == VINF_SUCCESS)
9813 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9814 }
9815 else
9816 {
9817 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9818 if (rcStrict == VINF_SUCCESS)
9819 {
9820 *pcbLimit = (uint16_t)uTmp;
9821 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9822 }
9823 }
9824 if (rcStrict == VINF_SUCCESS)
9825 *pGCPtrBase = uTmp;
9826 }
9827 else
9828 {
9829 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9830 if (rcStrict == VINF_SUCCESS)
9831 {
9832 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9833 if (rcStrict == VINF_SUCCESS)
9834 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9835 }
9836 }
9837 }
9838 return rcStrict;
9839}
9840
9841
9842
9843/**
9844 * Stores a data byte.
9845 *
9846 * @returns Strict VBox status code.
9847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9848 * @param iSegReg The index of the segment register to use for
9849 * this access. The base and limits are checked.
9850 * @param GCPtrMem The address of the guest memory.
9851 * @param u8Value The value to store.
9852 */
9853IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9854{
9855 /* The lazy approach for now... */
9856 uint8_t *pu8Dst;
9857 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9858 if (rc == VINF_SUCCESS)
9859 {
9860 *pu8Dst = u8Value;
9861 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9862 }
9863 return rc;
9864}
9865
9866
9867#ifdef IEM_WITH_SETJMP
9868/**
9869 * Stores a data byte, longjmp on error.
9870 *
9871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9872 * @param iSegReg The index of the segment register to use for
9873 * this access. The base and limits are checked.
9874 * @param GCPtrMem The address of the guest memory.
9875 * @param u8Value The value to store.
9876 */
9877IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9878{
9879 /* The lazy approach for now... */
9880 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9881 *pu8Dst = u8Value;
9882 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9883}
9884#endif
9885
9886
9887/**
9888 * Stores a data word.
9889 *
9890 * @returns Strict VBox status code.
9891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9892 * @param iSegReg The index of the segment register to use for
9893 * this access. The base and limits are checked.
9894 * @param GCPtrMem The address of the guest memory.
9895 * @param u16Value The value to store.
9896 */
9897IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9898{
9899 /* The lazy approach for now... */
9900 uint16_t *pu16Dst;
9901 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9902 if (rc == VINF_SUCCESS)
9903 {
9904 *pu16Dst = u16Value;
9905 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9906 }
9907 return rc;
9908}
9909
9910
9911#ifdef IEM_WITH_SETJMP
9912/**
9913 * Stores a data word, longjmp on error.
9914 *
9915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9916 * @param iSegReg The index of the segment register to use for
9917 * this access. The base and limits are checked.
9918 * @param GCPtrMem The address of the guest memory.
9919 * @param u16Value The value to store.
9920 */
9921IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9922{
9923 /* The lazy approach for now... */
9924 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9925 *pu16Dst = u16Value;
9926 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9927}
9928#endif
9929
9930
9931/**
9932 * Stores a data dword.
9933 *
9934 * @returns Strict VBox status code.
9935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9936 * @param iSegReg The index of the segment register to use for
9937 * this access. The base and limits are checked.
9938 * @param GCPtrMem The address of the guest memory.
9939 * @param u32Value The value to store.
9940 */
9941IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9942{
9943 /* The lazy approach for now... */
9944 uint32_t *pu32Dst;
9945 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9946 if (rc == VINF_SUCCESS)
9947 {
9948 *pu32Dst = u32Value;
9949 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9950 }
9951 return rc;
9952}
9953
9954
9955#ifdef IEM_WITH_SETJMP
9956/**
9957 * Stores a data dword.
9958 *
9959 * @returns Strict VBox status code.
9960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9961 * @param iSegReg The index of the segment register to use for
9962 * this access. The base and limits are checked.
9963 * @param GCPtrMem The address of the guest memory.
9964 * @param u32Value The value to store.
9965 */
9966IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9967{
9968 /* The lazy approach for now... */
9969 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9970 *pu32Dst = u32Value;
9971 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9972}
9973#endif
9974
9975
9976/**
9977 * Stores a data qword.
9978 *
9979 * @returns Strict VBox status code.
9980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9981 * @param iSegReg The index of the segment register to use for
9982 * this access. The base and limits are checked.
9983 * @param GCPtrMem The address of the guest memory.
9984 * @param u64Value The value to store.
9985 */
9986IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9987{
9988 /* The lazy approach for now... */
9989 uint64_t *pu64Dst;
9990 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9991 if (rc == VINF_SUCCESS)
9992 {
9993 *pu64Dst = u64Value;
9994 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9995 }
9996 return rc;
9997}
9998
9999
10000#ifdef IEM_WITH_SETJMP
10001/**
10002 * Stores a data qword, longjmp on error.
10003 *
10004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10005 * @param iSegReg The index of the segment register to use for
10006 * this access. The base and limits are checked.
10007 * @param GCPtrMem The address of the guest memory.
10008 * @param u64Value The value to store.
10009 */
10010IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10011{
10012 /* The lazy approach for now... */
10013 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10014 *pu64Dst = u64Value;
10015 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10016}
10017#endif
10018
10019
10020/**
10021 * Stores a data dqword.
10022 *
10023 * @returns Strict VBox status code.
10024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10025 * @param iSegReg The index of the segment register to use for
10026 * this access. The base and limits are checked.
10027 * @param GCPtrMem The address of the guest memory.
10028 * @param u128Value The value to store.
10029 */
10030IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10031{
10032 /* The lazy approach for now... */
10033 PRTUINT128U pu128Dst;
10034 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10035 if (rc == VINF_SUCCESS)
10036 {
10037 pu128Dst->au64[0] = u128Value.au64[0];
10038 pu128Dst->au64[1] = u128Value.au64[1];
10039 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10040 }
10041 return rc;
10042}
10043
10044
10045#ifdef IEM_WITH_SETJMP
10046/**
10047 * Stores a data dqword, longjmp on error.
10048 *
10049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10050 * @param iSegReg The index of the segment register to use for
10051 * this access. The base and limits are checked.
10052 * @param GCPtrMem The address of the guest memory.
10053 * @param u128Value The value to store.
10054 */
10055IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10056{
10057 /* The lazy approach for now... */
10058 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10059 pu128Dst->au64[0] = u128Value.au64[0];
10060 pu128Dst->au64[1] = u128Value.au64[1];
10061 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10062}
10063#endif
10064
10065
10066/**
10067 * Stores a data dqword, SSE aligned.
10068 *
10069 * @returns Strict VBox status code.
10070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10071 * @param iSegReg The index of the segment register to use for
10072 * this access. The base and limits are checked.
10073 * @param GCPtrMem The address of the guest memory.
10074 * @param u128Value The value to store.
10075 */
10076IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10077{
10078 /* The lazy approach for now... */
10079 if ( (GCPtrMem & 15)
10080 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10081 return iemRaiseGeneralProtectionFault0(pVCpu);
10082
10083 PRTUINT128U pu128Dst;
10084 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10085 if (rc == VINF_SUCCESS)
10086 {
10087 pu128Dst->au64[0] = u128Value.au64[0];
10088 pu128Dst->au64[1] = u128Value.au64[1];
10089 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10090 }
10091 return rc;
10092}
10093
10094
10095#ifdef IEM_WITH_SETJMP
10096/**
10097 * Stores a data dqword, SSE aligned.
10098 *
10099 * @returns Strict VBox status code.
10100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10101 * @param iSegReg The index of the segment register to use for
10102 * this access. The base and limits are checked.
10103 * @param GCPtrMem The address of the guest memory.
10104 * @param u128Value The value to store.
10105 */
10106DECL_NO_INLINE(IEM_STATIC, void)
10107iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10108{
10109 /* The lazy approach for now... */
10110 if ( (GCPtrMem & 15) == 0
10111 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10112 {
10113 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10114 pu128Dst->au64[0] = u128Value.au64[0];
10115 pu128Dst->au64[1] = u128Value.au64[1];
10116 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10117 return;
10118 }
10119
10120 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10121 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10122}
10123#endif
10124
10125
10126/**
10127 * Stores a data dqword.
10128 *
10129 * @returns Strict VBox status code.
10130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10131 * @param iSegReg The index of the segment register to use for
10132 * this access. The base and limits are checked.
10133 * @param GCPtrMem The address of the guest memory.
10134 * @param pu256Value Pointer to the value to store.
10135 */
10136IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10137{
10138 /* The lazy approach for now... */
10139 PRTUINT256U pu256Dst;
10140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10141 if (rc == VINF_SUCCESS)
10142 {
10143 pu256Dst->au64[0] = pu256Value->au64[0];
10144 pu256Dst->au64[1] = pu256Value->au64[1];
10145 pu256Dst->au64[2] = pu256Value->au64[2];
10146 pu256Dst->au64[3] = pu256Value->au64[3];
10147 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10148 }
10149 return rc;
10150}
10151
10152
10153#ifdef IEM_WITH_SETJMP
10154/**
10155 * Stores a data dqword, longjmp on error.
10156 *
10157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10158 * @param iSegReg The index of the segment register to use for
10159 * this access. The base and limits are checked.
10160 * @param GCPtrMem The address of the guest memory.
10161 * @param pu256Value Pointer to the value to store.
10162 */
10163IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10164{
10165 /* The lazy approach for now... */
10166 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10167 pu256Dst->au64[0] = pu256Value->au64[0];
10168 pu256Dst->au64[1] = pu256Value->au64[1];
10169 pu256Dst->au64[2] = pu256Value->au64[2];
10170 pu256Dst->au64[3] = pu256Value->au64[3];
10171 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10172}
10173#endif
10174
10175
10176/**
10177 * Stores a data dqword, AVX aligned.
10178 *
10179 * @returns Strict VBox status code.
10180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10181 * @param iSegReg The index of the segment register to use for
10182 * this access. The base and limits are checked.
10183 * @param GCPtrMem The address of the guest memory.
10184 * @param pu256Value Pointer to the value to store.
10185 */
10186IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10187{
10188 /* The lazy approach for now... */
10189 if (GCPtrMem & 31)
10190 return iemRaiseGeneralProtectionFault0(pVCpu);
10191
10192 PRTUINT256U pu256Dst;
10193 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10194 if (rc == VINF_SUCCESS)
10195 {
10196 pu256Dst->au64[0] = pu256Value->au64[0];
10197 pu256Dst->au64[1] = pu256Value->au64[1];
10198 pu256Dst->au64[2] = pu256Value->au64[2];
10199 pu256Dst->au64[3] = pu256Value->au64[3];
10200 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10201 }
10202 return rc;
10203}
10204
10205
10206#ifdef IEM_WITH_SETJMP
10207/**
10208 * Stores a data dqword, AVX aligned.
10209 *
10210 * @returns Strict VBox status code.
10211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10212 * @param iSegReg The index of the segment register to use for
10213 * this access. The base and limits are checked.
10214 * @param GCPtrMem The address of the guest memory.
10215 * @param pu256Value Pointer to the value to store.
10216 */
10217DECL_NO_INLINE(IEM_STATIC, void)
10218iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10219{
10220 /* The lazy approach for now... */
10221 if ((GCPtrMem & 31) == 0)
10222 {
10223 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10224 pu256Dst->au64[0] = pu256Value->au64[0];
10225 pu256Dst->au64[1] = pu256Value->au64[1];
10226 pu256Dst->au64[2] = pu256Value->au64[2];
10227 pu256Dst->au64[3] = pu256Value->au64[3];
10228 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10229 return;
10230 }
10231
10232 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10233 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10234}
10235#endif
10236
10237
10238/**
10239 * Stores a descriptor register (sgdt, sidt).
10240 *
10241 * @returns Strict VBox status code.
10242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10243 * @param cbLimit The limit.
10244 * @param GCPtrBase The base address.
10245 * @param iSegReg The index of the segment register to use for
10246 * this access. The base and limits are checked.
10247 * @param GCPtrMem The address of the guest memory.
10248 */
10249IEM_STATIC VBOXSTRICTRC
10250iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10251{
10252 /*
10253 * The SIDT and SGDT instructions actually stores the data using two
10254 * independent writes. The instructions does not respond to opsize prefixes.
10255 */
10256 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10257 if (rcStrict == VINF_SUCCESS)
10258 {
10259 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10260 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10261 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10262 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10263 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10264 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10265 else
10266 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10267 }
10268 return rcStrict;
10269}
10270
10271
10272/**
10273 * Pushes a word onto the stack.
10274 *
10275 * @returns Strict VBox status code.
10276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10277 * @param u16Value The value to push.
10278 */
10279IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10280{
10281 /* Increment the stack pointer. */
10282 uint64_t uNewRsp;
10283 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10284
10285 /* Write the word the lazy way. */
10286 uint16_t *pu16Dst;
10287 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10288 if (rc == VINF_SUCCESS)
10289 {
10290 *pu16Dst = u16Value;
10291 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10292 }
10293
10294 /* Commit the new RSP value unless we an access handler made trouble. */
10295 if (rc == VINF_SUCCESS)
10296 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10297
10298 return rc;
10299}
10300
10301
10302/**
10303 * Pushes a dword onto the stack.
10304 *
10305 * @returns Strict VBox status code.
10306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10307 * @param u32Value The value to push.
10308 */
10309IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10310{
10311 /* Increment the stack pointer. */
10312 uint64_t uNewRsp;
10313 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10314
10315 /* Write the dword the lazy way. */
10316 uint32_t *pu32Dst;
10317 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10318 if (rc == VINF_SUCCESS)
10319 {
10320 *pu32Dst = u32Value;
10321 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10322 }
10323
10324 /* Commit the new RSP value unless we an access handler made trouble. */
10325 if (rc == VINF_SUCCESS)
10326 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10327
10328 return rc;
10329}
10330
10331
10332/**
10333 * Pushes a dword segment register value onto the stack.
10334 *
10335 * @returns Strict VBox status code.
10336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10337 * @param u32Value The value to push.
10338 */
10339IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10340{
10341 /* Increment the stack pointer. */
10342 uint64_t uNewRsp;
10343 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10344
10345 /* The intel docs talks about zero extending the selector register
10346 value. My actual intel CPU here might be zero extending the value
10347 but it still only writes the lower word... */
10348 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10349 * happens when crossing an electric page boundrary, is the high word checked
10350 * for write accessibility or not? Probably it is. What about segment limits?
10351 * It appears this behavior is also shared with trap error codes.
10352 *
10353 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10354 * ancient hardware when it actually did change. */
10355 uint16_t *pu16Dst;
10356 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10357 if (rc == VINF_SUCCESS)
10358 {
10359 *pu16Dst = (uint16_t)u32Value;
10360 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10361 }
10362
10363 /* Commit the new RSP value unless we an access handler made trouble. */
10364 if (rc == VINF_SUCCESS)
10365 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10366
10367 return rc;
10368}
10369
10370
10371/**
10372 * Pushes a qword onto the stack.
10373 *
10374 * @returns Strict VBox status code.
10375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10376 * @param u64Value The value to push.
10377 */
10378IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10379{
10380 /* Increment the stack pointer. */
10381 uint64_t uNewRsp;
10382 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10383
10384 /* Write the word the lazy way. */
10385 uint64_t *pu64Dst;
10386 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10387 if (rc == VINF_SUCCESS)
10388 {
10389 *pu64Dst = u64Value;
10390 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10391 }
10392
10393 /* Commit the new RSP value unless we an access handler made trouble. */
10394 if (rc == VINF_SUCCESS)
10395 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10396
10397 return rc;
10398}
10399
10400
10401/**
10402 * Pops a word from the stack.
10403 *
10404 * @returns Strict VBox status code.
10405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10406 * @param pu16Value Where to store the popped value.
10407 */
10408IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10409{
10410 /* Increment the stack pointer. */
10411 uint64_t uNewRsp;
10412 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10413
10414 /* Write the word the lazy way. */
10415 uint16_t const *pu16Src;
10416 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10417 if (rc == VINF_SUCCESS)
10418 {
10419 *pu16Value = *pu16Src;
10420 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10421
10422 /* Commit the new RSP value. */
10423 if (rc == VINF_SUCCESS)
10424 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10425 }
10426
10427 return rc;
10428}
10429
10430
10431/**
10432 * Pops a dword from the stack.
10433 *
10434 * @returns Strict VBox status code.
10435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10436 * @param pu32Value Where to store the popped value.
10437 */
10438IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10439{
10440 /* Increment the stack pointer. */
10441 uint64_t uNewRsp;
10442 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10443
10444 /* Write the word the lazy way. */
10445 uint32_t const *pu32Src;
10446 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10447 if (rc == VINF_SUCCESS)
10448 {
10449 *pu32Value = *pu32Src;
10450 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10451
10452 /* Commit the new RSP value. */
10453 if (rc == VINF_SUCCESS)
10454 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10455 }
10456
10457 return rc;
10458}
10459
10460
10461/**
10462 * Pops a qword from the stack.
10463 *
10464 * @returns Strict VBox status code.
10465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10466 * @param pu64Value Where to store the popped value.
10467 */
10468IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10469{
10470 /* Increment the stack pointer. */
10471 uint64_t uNewRsp;
10472 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10473
10474 /* Write the word the lazy way. */
10475 uint64_t const *pu64Src;
10476 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10477 if (rc == VINF_SUCCESS)
10478 {
10479 *pu64Value = *pu64Src;
10480 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10481
10482 /* Commit the new RSP value. */
10483 if (rc == VINF_SUCCESS)
10484 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10485 }
10486
10487 return rc;
10488}
10489
10490
10491/**
10492 * Pushes a word onto the stack, using a temporary stack pointer.
10493 *
10494 * @returns Strict VBox status code.
10495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10496 * @param u16Value The value to push.
10497 * @param pTmpRsp Pointer to the temporary stack pointer.
10498 */
10499IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10500{
10501 /* Increment the stack pointer. */
10502 RTUINT64U NewRsp = *pTmpRsp;
10503 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10504
10505 /* Write the word the lazy way. */
10506 uint16_t *pu16Dst;
10507 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10508 if (rc == VINF_SUCCESS)
10509 {
10510 *pu16Dst = u16Value;
10511 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10512 }
10513
10514 /* Commit the new RSP value unless we an access handler made trouble. */
10515 if (rc == VINF_SUCCESS)
10516 *pTmpRsp = NewRsp;
10517
10518 return rc;
10519}
10520
10521
10522/**
10523 * Pushes a dword onto the stack, using a temporary stack pointer.
10524 *
10525 * @returns Strict VBox status code.
10526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10527 * @param u32Value The value to push.
10528 * @param pTmpRsp Pointer to the temporary stack pointer.
10529 */
10530IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10531{
10532 /* Increment the stack pointer. */
10533 RTUINT64U NewRsp = *pTmpRsp;
10534 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10535
10536 /* Write the word the lazy way. */
10537 uint32_t *pu32Dst;
10538 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10539 if (rc == VINF_SUCCESS)
10540 {
10541 *pu32Dst = u32Value;
10542 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10543 }
10544
10545 /* Commit the new RSP value unless we an access handler made trouble. */
10546 if (rc == VINF_SUCCESS)
10547 *pTmpRsp = NewRsp;
10548
10549 return rc;
10550}
10551
10552
10553/**
10554 * Pushes a dword onto the stack, using a temporary stack pointer.
10555 *
10556 * @returns Strict VBox status code.
10557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10558 * @param u64Value The value to push.
10559 * @param pTmpRsp Pointer to the temporary stack pointer.
10560 */
10561IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10562{
10563 /* Increment the stack pointer. */
10564 RTUINT64U NewRsp = *pTmpRsp;
10565 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10566
10567 /* Write the word the lazy way. */
10568 uint64_t *pu64Dst;
10569 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10570 if (rc == VINF_SUCCESS)
10571 {
10572 *pu64Dst = u64Value;
10573 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10574 }
10575
10576 /* Commit the new RSP value unless we an access handler made trouble. */
10577 if (rc == VINF_SUCCESS)
10578 *pTmpRsp = NewRsp;
10579
10580 return rc;
10581}
10582
10583
10584/**
10585 * Pops a word from the stack, using a temporary stack pointer.
10586 *
10587 * @returns Strict VBox status code.
10588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10589 * @param pu16Value Where to store the popped value.
10590 * @param pTmpRsp Pointer to the temporary stack pointer.
10591 */
10592IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10593{
10594 /* Increment the stack pointer. */
10595 RTUINT64U NewRsp = *pTmpRsp;
10596 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10597
10598 /* Write the word the lazy way. */
10599 uint16_t const *pu16Src;
10600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10601 if (rc == VINF_SUCCESS)
10602 {
10603 *pu16Value = *pu16Src;
10604 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10605
10606 /* Commit the new RSP value. */
10607 if (rc == VINF_SUCCESS)
10608 *pTmpRsp = NewRsp;
10609 }
10610
10611 return rc;
10612}
10613
10614
10615/**
10616 * Pops a dword from the stack, using a temporary stack pointer.
10617 *
10618 * @returns Strict VBox status code.
10619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10620 * @param pu32Value Where to store the popped value.
10621 * @param pTmpRsp Pointer to the temporary stack pointer.
10622 */
10623IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10624{
10625 /* Increment the stack pointer. */
10626 RTUINT64U NewRsp = *pTmpRsp;
10627 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10628
10629 /* Write the word the lazy way. */
10630 uint32_t const *pu32Src;
10631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10632 if (rc == VINF_SUCCESS)
10633 {
10634 *pu32Value = *pu32Src;
10635 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10636
10637 /* Commit the new RSP value. */
10638 if (rc == VINF_SUCCESS)
10639 *pTmpRsp = NewRsp;
10640 }
10641
10642 return rc;
10643}
10644
10645
10646/**
10647 * Pops a qword from the stack, using a temporary stack pointer.
10648 *
10649 * @returns Strict VBox status code.
10650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10651 * @param pu64Value Where to store the popped value.
10652 * @param pTmpRsp Pointer to the temporary stack pointer.
10653 */
10654IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10655{
10656 /* Increment the stack pointer. */
10657 RTUINT64U NewRsp = *pTmpRsp;
10658 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10659
10660 /* Write the word the lazy way. */
10661 uint64_t const *pu64Src;
10662 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10663 if (rcStrict == VINF_SUCCESS)
10664 {
10665 *pu64Value = *pu64Src;
10666 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10667
10668 /* Commit the new RSP value. */
10669 if (rcStrict == VINF_SUCCESS)
10670 *pTmpRsp = NewRsp;
10671 }
10672
10673 return rcStrict;
10674}
10675
10676
10677/**
10678 * Begin a special stack push (used by interrupt, exceptions and such).
10679 *
10680 * This will raise \#SS or \#PF if appropriate.
10681 *
10682 * @returns Strict VBox status code.
10683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10684 * @param cbMem The number of bytes to push onto the stack.
10685 * @param ppvMem Where to return the pointer to the stack memory.
10686 * As with the other memory functions this could be
10687 * direct access or bounce buffered access, so
10688 * don't commit register until the commit call
10689 * succeeds.
10690 * @param puNewRsp Where to return the new RSP value. This must be
10691 * passed unchanged to
10692 * iemMemStackPushCommitSpecial().
10693 */
10694IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10695{
10696 Assert(cbMem < UINT8_MAX);
10697 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10698 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10699}
10700
10701
10702/**
10703 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10704 *
10705 * This will update the rSP.
10706 *
10707 * @returns Strict VBox status code.
10708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10709 * @param pvMem The pointer returned by
10710 * iemMemStackPushBeginSpecial().
10711 * @param uNewRsp The new RSP value returned by
10712 * iemMemStackPushBeginSpecial().
10713 */
10714IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10715{
10716 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10717 if (rcStrict == VINF_SUCCESS)
10718 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10719 return rcStrict;
10720}
10721
10722
10723/**
10724 * Begin a special stack pop (used by iret, retf and such).
10725 *
10726 * This will raise \#SS or \#PF if appropriate.
10727 *
10728 * @returns Strict VBox status code.
10729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10730 * @param cbMem The number of bytes to pop from the stack.
10731 * @param ppvMem Where to return the pointer to the stack memory.
10732 * @param puNewRsp Where to return the new RSP value. This must be
10733 * assigned to CPUMCTX::rsp manually some time
10734 * after iemMemStackPopDoneSpecial() has been
10735 * called.
10736 */
10737IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10738{
10739 Assert(cbMem < UINT8_MAX);
10740 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10741 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10742}
10743
10744
10745/**
10746 * Continue a special stack pop (used by iret and retf).
10747 *
10748 * This will raise \#SS or \#PF if appropriate.
10749 *
10750 * @returns Strict VBox status code.
10751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10752 * @param cbMem The number of bytes to pop from the stack.
10753 * @param ppvMem Where to return the pointer to the stack memory.
10754 * @param puNewRsp Where to return the new RSP value. This must be
10755 * assigned to CPUMCTX::rsp manually some time
10756 * after iemMemStackPopDoneSpecial() has been
10757 * called.
10758 */
10759IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10760{
10761 Assert(cbMem < UINT8_MAX);
10762 RTUINT64U NewRsp;
10763 NewRsp.u = *puNewRsp;
10764 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10765 *puNewRsp = NewRsp.u;
10766 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10767}
10768
10769
10770/**
10771 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10772 * iemMemStackPopContinueSpecial).
10773 *
10774 * The caller will manually commit the rSP.
10775 *
10776 * @returns Strict VBox status code.
10777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10778 * @param pvMem The pointer returned by
10779 * iemMemStackPopBeginSpecial() or
10780 * iemMemStackPopContinueSpecial().
10781 */
10782IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10783{
10784 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10785}
10786
10787
10788/**
10789 * Fetches a system table byte.
10790 *
10791 * @returns Strict VBox status code.
10792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10793 * @param pbDst Where to return the byte.
10794 * @param iSegReg The index of the segment register to use for
10795 * this access. The base and limits are checked.
10796 * @param GCPtrMem The address of the guest memory.
10797 */
10798IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10799{
10800 /* The lazy approach for now... */
10801 uint8_t const *pbSrc;
10802 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10803 if (rc == VINF_SUCCESS)
10804 {
10805 *pbDst = *pbSrc;
10806 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10807 }
10808 return rc;
10809}
10810
10811
10812/**
10813 * Fetches a system table word.
10814 *
10815 * @returns Strict VBox status code.
10816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10817 * @param pu16Dst Where to return the word.
10818 * @param iSegReg The index of the segment register to use for
10819 * this access. The base and limits are checked.
10820 * @param GCPtrMem The address of the guest memory.
10821 */
10822IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10823{
10824 /* The lazy approach for now... */
10825 uint16_t const *pu16Src;
10826 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10827 if (rc == VINF_SUCCESS)
10828 {
10829 *pu16Dst = *pu16Src;
10830 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10831 }
10832 return rc;
10833}
10834
10835
10836/**
10837 * Fetches a system table dword.
10838 *
10839 * @returns Strict VBox status code.
10840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10841 * @param pu32Dst Where to return the dword.
10842 * @param iSegReg The index of the segment register to use for
10843 * this access. The base and limits are checked.
10844 * @param GCPtrMem The address of the guest memory.
10845 */
10846IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10847{
10848 /* The lazy approach for now... */
10849 uint32_t const *pu32Src;
10850 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10851 if (rc == VINF_SUCCESS)
10852 {
10853 *pu32Dst = *pu32Src;
10854 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10855 }
10856 return rc;
10857}
10858
10859
10860/**
10861 * Fetches a system table qword.
10862 *
10863 * @returns Strict VBox status code.
10864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10865 * @param pu64Dst Where to return the qword.
10866 * @param iSegReg The index of the segment register to use for
10867 * this access. The base and limits are checked.
10868 * @param GCPtrMem The address of the guest memory.
10869 */
10870IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10871{
10872 /* The lazy approach for now... */
10873 uint64_t const *pu64Src;
10874 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10875 if (rc == VINF_SUCCESS)
10876 {
10877 *pu64Dst = *pu64Src;
10878 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10879 }
10880 return rc;
10881}
10882
10883
10884/**
10885 * Fetches a descriptor table entry with caller specified error code.
10886 *
10887 * @returns Strict VBox status code.
10888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10889 * @param pDesc Where to return the descriptor table entry.
10890 * @param uSel The selector which table entry to fetch.
10891 * @param uXcpt The exception to raise on table lookup error.
10892 * @param uErrorCode The error code associated with the exception.
10893 */
10894IEM_STATIC VBOXSTRICTRC
10895iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10896{
10897 AssertPtr(pDesc);
10898 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10899
10900 /** @todo did the 286 require all 8 bytes to be accessible? */
10901 /*
10902 * Get the selector table base and check bounds.
10903 */
10904 RTGCPTR GCPtrBase;
10905 if (uSel & X86_SEL_LDT)
10906 {
10907 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10908 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10909 {
10910 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10911 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10912 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10913 uErrorCode, 0);
10914 }
10915
10916 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10917 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10918 }
10919 else
10920 {
10921 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10922 {
10923 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10924 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10925 uErrorCode, 0);
10926 }
10927 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10928 }
10929
10930 /*
10931 * Read the legacy descriptor and maybe the long mode extensions if
10932 * required.
10933 */
10934 VBOXSTRICTRC rcStrict;
10935 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10936 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10937 else
10938 {
10939 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10940 if (rcStrict == VINF_SUCCESS)
10941 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10942 if (rcStrict == VINF_SUCCESS)
10943 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10944 if (rcStrict == VINF_SUCCESS)
10945 pDesc->Legacy.au16[3] = 0;
10946 else
10947 return rcStrict;
10948 }
10949
10950 if (rcStrict == VINF_SUCCESS)
10951 {
10952 if ( !IEM_IS_LONG_MODE(pVCpu)
10953 || pDesc->Legacy.Gen.u1DescType)
10954 pDesc->Long.au64[1] = 0;
10955 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10956 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10957 else
10958 {
10959 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10960 /** @todo is this the right exception? */
10961 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10962 }
10963 }
10964 return rcStrict;
10965}
10966
10967
10968/**
10969 * Fetches a descriptor table entry.
10970 *
10971 * @returns Strict VBox status code.
10972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10973 * @param pDesc Where to return the descriptor table entry.
10974 * @param uSel The selector which table entry to fetch.
10975 * @param uXcpt The exception to raise on table lookup error.
10976 */
10977IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10978{
10979 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10980}
10981
10982
10983/**
10984 * Fakes a long mode stack selector for SS = 0.
10985 *
10986 * @param pDescSs Where to return the fake stack descriptor.
10987 * @param uDpl The DPL we want.
10988 */
10989IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10990{
10991 pDescSs->Long.au64[0] = 0;
10992 pDescSs->Long.au64[1] = 0;
10993 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10994 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10995 pDescSs->Long.Gen.u2Dpl = uDpl;
10996 pDescSs->Long.Gen.u1Present = 1;
10997 pDescSs->Long.Gen.u1Long = 1;
10998}
10999
11000
11001/**
11002 * Marks the selector descriptor as accessed (only non-system descriptors).
11003 *
11004 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11005 * will therefore skip the limit checks.
11006 *
11007 * @returns Strict VBox status code.
11008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11009 * @param uSel The selector.
11010 */
11011IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11012{
11013 /*
11014 * Get the selector table base and calculate the entry address.
11015 */
11016 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11017 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11018 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11019 GCPtr += uSel & X86_SEL_MASK;
11020
11021 /*
11022 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11023 * ugly stuff to avoid this. This will make sure it's an atomic access
11024 * as well more or less remove any question about 8-bit or 32-bit accesss.
11025 */
11026 VBOXSTRICTRC rcStrict;
11027 uint32_t volatile *pu32;
11028 if ((GCPtr & 3) == 0)
11029 {
11030 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11031 GCPtr += 2 + 2;
11032 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11033 if (rcStrict != VINF_SUCCESS)
11034 return rcStrict;
11035 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11036 }
11037 else
11038 {
11039 /* The misaligned GDT/LDT case, map the whole thing. */
11040 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11041 if (rcStrict != VINF_SUCCESS)
11042 return rcStrict;
11043 switch ((uintptr_t)pu32 & 3)
11044 {
11045 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11046 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11047 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11048 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11049 }
11050 }
11051
11052 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11053}
11054
11055/** @} */
11056
11057
11058/*
11059 * Include the C/C++ implementation of instruction.
11060 */
11061#include "IEMAllCImpl.cpp.h"
11062
11063
11064
11065/** @name "Microcode" macros.
11066 *
11067 * The idea is that we should be able to use the same code to interpret
11068 * instructions as well as recompiler instructions. Thus this obfuscation.
11069 *
11070 * @{
11071 */
11072#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11073#define IEM_MC_END() }
11074#define IEM_MC_PAUSE() do {} while (0)
11075#define IEM_MC_CONTINUE() do {} while (0)
11076
11077/** Internal macro. */
11078#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11079 do \
11080 { \
11081 VBOXSTRICTRC rcStrict2 = a_Expr; \
11082 if (rcStrict2 != VINF_SUCCESS) \
11083 return rcStrict2; \
11084 } while (0)
11085
11086
11087#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11088#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11089#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11090#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11091#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11092#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11093#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11094#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11095#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11096 do { \
11097 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11098 return iemRaiseDeviceNotAvailable(pVCpu); \
11099 } while (0)
11100#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11101 do { \
11102 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11103 return iemRaiseDeviceNotAvailable(pVCpu); \
11104 } while (0)
11105#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11106 do { \
11107 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
11108 return iemRaiseMathFault(pVCpu); \
11109 } while (0)
11110#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11111 do { \
11112 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11113 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11114 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11115 return iemRaiseUndefinedOpcode(pVCpu); \
11116 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11117 return iemRaiseDeviceNotAvailable(pVCpu); \
11118 } while (0)
11119#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11120 do { \
11121 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11122 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11123 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11124 return iemRaiseUndefinedOpcode(pVCpu); \
11125 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11126 return iemRaiseDeviceNotAvailable(pVCpu); \
11127 } while (0)
11128#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11129 do { \
11130 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11131 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11132 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11133 return iemRaiseUndefinedOpcode(pVCpu); \
11134 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11135 return iemRaiseDeviceNotAvailable(pVCpu); \
11136 } while (0)
11137#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11138 do { \
11139 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11140 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11141 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11142 return iemRaiseUndefinedOpcode(pVCpu); \
11143 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11144 return iemRaiseDeviceNotAvailable(pVCpu); \
11145 } while (0)
11146#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11147 do { \
11148 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11149 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11150 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11151 return iemRaiseUndefinedOpcode(pVCpu); \
11152 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11153 return iemRaiseDeviceNotAvailable(pVCpu); \
11154 } while (0)
11155#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11156 do { \
11157 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11158 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11159 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11160 return iemRaiseUndefinedOpcode(pVCpu); \
11161 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11162 return iemRaiseDeviceNotAvailable(pVCpu); \
11163 } while (0)
11164#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11165 do { \
11166 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11167 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11168 return iemRaiseUndefinedOpcode(pVCpu); \
11169 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11170 return iemRaiseDeviceNotAvailable(pVCpu); \
11171 } while (0)
11172#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11173 do { \
11174 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11175 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11176 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11177 return iemRaiseUndefinedOpcode(pVCpu); \
11178 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11179 return iemRaiseDeviceNotAvailable(pVCpu); \
11180 } while (0)
11181#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11182 do { \
11183 if (pVCpu->iem.s.uCpl != 0) \
11184 return iemRaiseGeneralProtectionFault0(pVCpu); \
11185 } while (0)
11186#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11187 do { \
11188 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11189 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11190 } while (0)
11191#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11192 do { \
11193 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11194 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11195 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11196 return iemRaiseUndefinedOpcode(pVCpu); \
11197 } while (0)
11198#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11199 do { \
11200 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11201 return iemRaiseGeneralProtectionFault0(pVCpu); \
11202 } while (0)
11203
11204
11205#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11206#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11207#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11208#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11209#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11210#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11211#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11212 uint32_t a_Name; \
11213 uint32_t *a_pName = &a_Name
11214#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11215 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11216
11217#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11218#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11219
11220#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11221#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11222#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11223#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11224#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11225#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11226#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11227#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11228#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11229#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11230#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11231#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11232#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11233#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11234#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11235#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11236#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11237#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11238 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11239 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11240 } while (0)
11241#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11242 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11243 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11244 } while (0)
11245#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11246 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11247 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11248 } while (0)
11249/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11250#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11251 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11252 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11253 } while (0)
11254#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11255 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11256 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11257 } while (0)
11258/** @note Not for IOPL or IF testing or modification. */
11259#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11260#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11261#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
11262#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
11263
11264#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11265#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11266#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11267#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11268#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11269#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11270#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11271#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11272#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11273#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11274/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11275#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11276 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11277 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11278 } while (0)
11279#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11280 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11281 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11282 } while (0)
11283#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11284 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11285
11286
11287#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11288#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11289/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11290 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11291#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11292#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11293/** @note Not for IOPL or IF testing or modification. */
11294#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11295
11296#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11297#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11298#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11299 do { \
11300 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11301 *pu32Reg += (a_u32Value); \
11302 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11303 } while (0)
11304#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11305
11306#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11307#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11308#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11309 do { \
11310 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11311 *pu32Reg -= (a_u32Value); \
11312 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11313 } while (0)
11314#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11315#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11316
11317#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11318#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11319#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11320#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11321#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11322#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11323#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11324
11325#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11326#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11327#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11328#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11329
11330#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11331#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11332#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11333
11334#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11335#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11336#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11337
11338#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11339#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11340#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11341
11342#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11343#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11344#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11345
11346#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11347
11348#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11349
11350#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11351#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11352#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11353 do { \
11354 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11355 *pu32Reg &= (a_u32Value); \
11356 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11357 } while (0)
11358#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11359
11360#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11361#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11362#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11363 do { \
11364 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11365 *pu32Reg |= (a_u32Value); \
11366 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11367 } while (0)
11368#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11369
11370
11371/** @note Not for IOPL or IF modification. */
11372#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11373/** @note Not for IOPL or IF modification. */
11374#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11375/** @note Not for IOPL or IF modification. */
11376#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11377
11378#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11379
11380/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11381#define IEM_MC_FPU_TO_MMX_MODE() do { \
11382 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
11383 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
11384 } while (0)
11385
11386/** Switches the FPU state from MMX mode (FTW=0xffff). */
11387#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11388 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
11389 } while (0)
11390
11391#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11392 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
11393#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11394 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11395#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11396 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11397 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11398 } while (0)
11399#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11400 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11401 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11402 } while (0)
11403#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11404 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11405#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11406 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11407#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11408 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11409
11410#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11411 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
11412 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
11413 } while (0)
11414#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11415 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11416#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11417 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11418#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11419 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11420#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11421 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11422 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11423 } while (0)
11424#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11425 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11426#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11427 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11428 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11429 } while (0)
11430#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11431 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11432#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11433 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11434 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11435 } while (0)
11436#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11437 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11438#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11439 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11440#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11441 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11442#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11443 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
11444#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11445 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
11446 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
11447 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
11448 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
11449 } while (0)
11450
11451#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11452 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11453 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
11454 } while (0)
11455#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11456 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11457 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11458 } while (0)
11459#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11460 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11461 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11462 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11463 } while (0)
11464#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11465 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11466 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11467 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11468 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11469 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11470 } while (0)
11471
11472#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11473#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11474 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11475 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11476 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11477 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11478 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11479 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11480 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11481 } while (0)
11482#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11483 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11484 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11485 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11486 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11487 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11488 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11489 } while (0)
11490#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11491 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11492 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11493 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11494 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11495 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11496 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11497 } while (0)
11498#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11499 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11500 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11501 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11502 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11503 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11504 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11505 } while (0)
11506
11507#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11508 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11509#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11510 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11511#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11512 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
11513#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11514 do { uintptr_t const iYRegTmp = (a_iYReg); \
11515 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11516 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11517 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
11518 } while (0)
11519
11520#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11521 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11522 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11523 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11524 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11525 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11526 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11527 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11528 } while (0)
11529#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11530 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11531 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11532 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11533 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11534 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11535 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11536 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11537 } while (0)
11538#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11539 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11540 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11541 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11542 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11543 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11544 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11545 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11546 } while (0)
11547
11548#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11549 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11550 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11551 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11552 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11553 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11554 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11555 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11556 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11557 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11558 } while (0)
11559#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11560 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11561 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11562 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11563 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11564 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11565 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11566 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11567 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11568 } while (0)
11569#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11570 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11571 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11572 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11573 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11574 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11575 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11576 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11577 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11578 } while (0)
11579#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11580 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11581 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11582 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11583 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11584 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11585 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11586 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11587 } while (0)
11588
11589#ifndef IEM_WITH_SETJMP
11590# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11592# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11594# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11595 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11596#else
11597# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11598 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11599# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11600 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11601# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11602 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11603#endif
11604
11605#ifndef IEM_WITH_SETJMP
11606# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11607 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11608# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11609 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11610# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11612#else
11613# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11614 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11615# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11616 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11617# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11618 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11619#endif
11620
11621#ifndef IEM_WITH_SETJMP
11622# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11624# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11625 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11626# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11627 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11628#else
11629# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11630 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11631# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11632 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11633# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11634 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11635#endif
11636
11637#ifdef SOME_UNUSED_FUNCTION
11638# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11639 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11640#endif
11641
11642#ifndef IEM_WITH_SETJMP
11643# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11644 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11645# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11646 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11647# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11649# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11650 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11651#else
11652# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11653 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11654# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11655 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11656# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11657 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11658# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11659 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11660#endif
11661
11662#ifndef IEM_WITH_SETJMP
11663# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11664 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11665# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11667# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11669#else
11670# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11671 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11672# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11673 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11674# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11675 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11676#endif
11677
11678#ifndef IEM_WITH_SETJMP
11679# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11681# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11683#else
11684# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11685 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11686# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11687 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11688#endif
11689
11690#ifndef IEM_WITH_SETJMP
11691# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11693# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11695#else
11696# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11697 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11698# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11699 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11700#endif
11701
11702
11703
11704#ifndef IEM_WITH_SETJMP
11705# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11706 do { \
11707 uint8_t u8Tmp; \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11709 (a_u16Dst) = u8Tmp; \
11710 } while (0)
11711# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11712 do { \
11713 uint8_t u8Tmp; \
11714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11715 (a_u32Dst) = u8Tmp; \
11716 } while (0)
11717# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11718 do { \
11719 uint8_t u8Tmp; \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11721 (a_u64Dst) = u8Tmp; \
11722 } while (0)
11723# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11724 do { \
11725 uint16_t u16Tmp; \
11726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11727 (a_u32Dst) = u16Tmp; \
11728 } while (0)
11729# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11730 do { \
11731 uint16_t u16Tmp; \
11732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11733 (a_u64Dst) = u16Tmp; \
11734 } while (0)
11735# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11736 do { \
11737 uint32_t u32Tmp; \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11739 (a_u64Dst) = u32Tmp; \
11740 } while (0)
11741#else /* IEM_WITH_SETJMP */
11742# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11743 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11744# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11745 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11746# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11747 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11749 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11750# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11751 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11752# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11753 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11754#endif /* IEM_WITH_SETJMP */
11755
11756#ifndef IEM_WITH_SETJMP
11757# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11758 do { \
11759 uint8_t u8Tmp; \
11760 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11761 (a_u16Dst) = (int8_t)u8Tmp; \
11762 } while (0)
11763# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11764 do { \
11765 uint8_t u8Tmp; \
11766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11767 (a_u32Dst) = (int8_t)u8Tmp; \
11768 } while (0)
11769# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11770 do { \
11771 uint8_t u8Tmp; \
11772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11773 (a_u64Dst) = (int8_t)u8Tmp; \
11774 } while (0)
11775# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11776 do { \
11777 uint16_t u16Tmp; \
11778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11779 (a_u32Dst) = (int16_t)u16Tmp; \
11780 } while (0)
11781# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11782 do { \
11783 uint16_t u16Tmp; \
11784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11785 (a_u64Dst) = (int16_t)u16Tmp; \
11786 } while (0)
11787# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11788 do { \
11789 uint32_t u32Tmp; \
11790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11791 (a_u64Dst) = (int32_t)u32Tmp; \
11792 } while (0)
11793#else /* IEM_WITH_SETJMP */
11794# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11795 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11796# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11797 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11798# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11799 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11800# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11801 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11802# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11803 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11804# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11805 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11806#endif /* IEM_WITH_SETJMP */
11807
11808#ifndef IEM_WITH_SETJMP
11809# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11810 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11811# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11812 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11813# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11815# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11816 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11817#else
11818# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11819 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11820# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11821 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11822# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11823 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11824# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11825 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11826#endif
11827
11828#ifndef IEM_WITH_SETJMP
11829# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11830 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11831# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11833# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11834 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11835# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11836 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11837#else
11838# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11839 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11840# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11841 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11842# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11843 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11844# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11845 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11846#endif
11847
11848#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11849#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11850#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11851#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11852#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11853#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11854#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11855 do { \
11856 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11857 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11858 } while (0)
11859
11860#ifndef IEM_WITH_SETJMP
11861# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11862 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11863# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11864 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11865#else
11866# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11867 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11868# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11869 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11870#endif
11871
11872#ifndef IEM_WITH_SETJMP
11873# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11875# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11876 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11877#else
11878# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11879 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11880# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11881 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11882#endif
11883
11884
11885#define IEM_MC_PUSH_U16(a_u16Value) \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11887#define IEM_MC_PUSH_U32(a_u32Value) \
11888 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11889#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11891#define IEM_MC_PUSH_U64(a_u64Value) \
11892 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11893
11894#define IEM_MC_POP_U16(a_pu16Value) \
11895 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11896#define IEM_MC_POP_U32(a_pu32Value) \
11897 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11898#define IEM_MC_POP_U64(a_pu64Value) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11900
11901/** Maps guest memory for direct or bounce buffered access.
11902 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11903 * @remarks May return.
11904 */
11905#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11906 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11907
11908/** Maps guest memory for direct or bounce buffered access.
11909 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11910 * @remarks May return.
11911 */
11912#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11913 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11914
11915/** Commits the memory and unmaps the guest memory.
11916 * @remarks May return.
11917 */
11918#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11919 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11920
11921/** Commits the memory and unmaps the guest memory unless the FPU status word
11922 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11923 * that would cause FLD not to store.
11924 *
11925 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11926 * store, while \#P will not.
11927 *
11928 * @remarks May in theory return - for now.
11929 */
11930#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11931 do { \
11932 if ( !(a_u16FSW & X86_FSW_ES) \
11933 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11934 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
11935 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11936 } while (0)
11937
11938/** Calculate efficient address from R/M. */
11939#ifndef IEM_WITH_SETJMP
11940# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11941 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11942#else
11943# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11944 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11945#endif
11946
11947#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11948#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11949#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11950#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11951#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11952#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11953#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11954
11955/**
11956 * Defers the rest of the instruction emulation to a C implementation routine
11957 * and returns, only taking the standard parameters.
11958 *
11959 * @param a_pfnCImpl The pointer to the C routine.
11960 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11961 */
11962#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11963
11964/**
11965 * Defers the rest of instruction emulation to a C implementation routine and
11966 * returns, taking one argument in addition to the standard ones.
11967 *
11968 * @param a_pfnCImpl The pointer to the C routine.
11969 * @param a0 The argument.
11970 */
11971#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11972
11973/**
11974 * Defers the rest of the instruction emulation to a C implementation routine
11975 * and returns, taking two arguments in addition to the standard ones.
11976 *
11977 * @param a_pfnCImpl The pointer to the C routine.
11978 * @param a0 The first extra argument.
11979 * @param a1 The second extra argument.
11980 */
11981#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11982
11983/**
11984 * Defers the rest of the instruction emulation to a C implementation routine
11985 * and returns, taking three arguments in addition to the standard ones.
11986 *
11987 * @param a_pfnCImpl The pointer to the C routine.
11988 * @param a0 The first extra argument.
11989 * @param a1 The second extra argument.
11990 * @param a2 The third extra argument.
11991 */
11992#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11993
11994/**
11995 * Defers the rest of the instruction emulation to a C implementation routine
11996 * and returns, taking four arguments in addition to the standard ones.
11997 *
11998 * @param a_pfnCImpl The pointer to the C routine.
11999 * @param a0 The first extra argument.
12000 * @param a1 The second extra argument.
12001 * @param a2 The third extra argument.
12002 * @param a3 The fourth extra argument.
12003 */
12004#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12005
12006/**
12007 * Defers the rest of the instruction emulation to a C implementation routine
12008 * and returns, taking two arguments in addition to the standard ones.
12009 *
12010 * @param a_pfnCImpl The pointer to the C routine.
12011 * @param a0 The first extra argument.
12012 * @param a1 The second extra argument.
12013 * @param a2 The third extra argument.
12014 * @param a3 The fourth extra argument.
12015 * @param a4 The fifth extra argument.
12016 */
12017#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12018
12019/**
12020 * Defers the entire instruction emulation to a C implementation routine and
12021 * returns, only taking the standard parameters.
12022 *
12023 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12024 *
12025 * @param a_pfnCImpl The pointer to the C routine.
12026 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12027 */
12028#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12029
12030/**
12031 * Defers the entire instruction emulation to a C implementation routine and
12032 * returns, taking one argument in addition to the standard ones.
12033 *
12034 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12035 *
12036 * @param a_pfnCImpl The pointer to the C routine.
12037 * @param a0 The argument.
12038 */
12039#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12040
12041/**
12042 * Defers the entire instruction emulation to a C implementation routine and
12043 * returns, taking two arguments in addition to the standard ones.
12044 *
12045 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12046 *
12047 * @param a_pfnCImpl The pointer to the C routine.
12048 * @param a0 The first extra argument.
12049 * @param a1 The second extra argument.
12050 */
12051#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12052
12053/**
12054 * Defers the entire instruction emulation to a C implementation routine and
12055 * returns, taking three arguments in addition to the standard ones.
12056 *
12057 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12058 *
12059 * @param a_pfnCImpl The pointer to the C routine.
12060 * @param a0 The first extra argument.
12061 * @param a1 The second extra argument.
12062 * @param a2 The third extra argument.
12063 */
12064#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12065
12066/**
12067 * Calls a FPU assembly implementation taking one visible argument.
12068 *
12069 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12070 * @param a0 The first extra argument.
12071 */
12072#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12073 do { \
12074 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
12075 } while (0)
12076
12077/**
12078 * Calls a FPU assembly implementation taking two visible arguments.
12079 *
12080 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12081 * @param a0 The first extra argument.
12082 * @param a1 The second extra argument.
12083 */
12084#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12085 do { \
12086 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12087 } while (0)
12088
12089/**
12090 * Calls a FPU assembly implementation taking three visible arguments.
12091 *
12092 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12093 * @param a0 The first extra argument.
12094 * @param a1 The second extra argument.
12095 * @param a2 The third extra argument.
12096 */
12097#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12098 do { \
12099 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12100 } while (0)
12101
12102#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12103 do { \
12104 (a_FpuData).FSW = (a_FSW); \
12105 (a_FpuData).r80Result = *(a_pr80Value); \
12106 } while (0)
12107
12108/** Pushes FPU result onto the stack. */
12109#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12110 iemFpuPushResult(pVCpu, &a_FpuData)
12111/** Pushes FPU result onto the stack and sets the FPUDP. */
12112#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12113 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12114
12115/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12116#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12117 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12118
12119/** Stores FPU result in a stack register. */
12120#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12121 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12122/** Stores FPU result in a stack register and pops the stack. */
12123#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12124 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12125/** Stores FPU result in a stack register and sets the FPUDP. */
12126#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12127 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12128/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12129 * stack. */
12130#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12131 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12132
12133/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12134#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12135 iemFpuUpdateOpcodeAndIp(pVCpu)
12136/** Free a stack register (for FFREE and FFREEP). */
12137#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12138 iemFpuStackFree(pVCpu, a_iStReg)
12139/** Increment the FPU stack pointer. */
12140#define IEM_MC_FPU_STACK_INC_TOP() \
12141 iemFpuStackIncTop(pVCpu)
12142/** Decrement the FPU stack pointer. */
12143#define IEM_MC_FPU_STACK_DEC_TOP() \
12144 iemFpuStackDecTop(pVCpu)
12145
12146/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12147#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12148 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12149/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12150#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12151 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12152/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12153#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12154 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12155/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12156#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12157 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12158/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12159 * stack. */
12160#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12161 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12162/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12163#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12164 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12165
12166/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12167#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12168 iemFpuStackUnderflow(pVCpu, a_iStDst)
12169/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12170 * stack. */
12171#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12172 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12173/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12174 * FPUDS. */
12175#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12176 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12177/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12178 * FPUDS. Pops stack. */
12179#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12180 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12181/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12182 * stack twice. */
12183#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12184 iemFpuStackUnderflowThenPopPop(pVCpu)
12185/** Raises a FPU stack underflow exception for an instruction pushing a result
12186 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12187#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12188 iemFpuStackPushUnderflow(pVCpu)
12189/** Raises a FPU stack underflow exception for an instruction pushing a result
12190 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12191#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12192 iemFpuStackPushUnderflowTwo(pVCpu)
12193
12194/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12195 * FPUIP, FPUCS and FOP. */
12196#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12197 iemFpuStackPushOverflow(pVCpu)
12198/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12199 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12200#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12201 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12202/** Prepares for using the FPU state.
12203 * Ensures that we can use the host FPU in the current context (RC+R0.
12204 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12205#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12206/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12207#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12208/** Actualizes the guest FPU state so it can be accessed and modified. */
12209#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12210
12211/** Prepares for using the SSE state.
12212 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12213 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12214#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12215/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12216#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12217/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12218#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12219
12220/** Prepares for using the AVX state.
12221 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12222 * Ensures the guest AVX state in the CPUMCTX is up to date.
12223 * @note This will include the AVX512 state too when support for it is added
12224 * due to the zero extending feature of VEX instruction. */
12225#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12226/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12227#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12228/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12229#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12230
12231/**
12232 * Calls a MMX assembly implementation taking two visible arguments.
12233 *
12234 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12235 * @param a0 The first extra argument.
12236 * @param a1 The second extra argument.
12237 */
12238#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12239 do { \
12240 IEM_MC_PREPARE_FPU_USAGE(); \
12241 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12242 } while (0)
12243
12244/**
12245 * Calls a MMX assembly implementation taking three visible arguments.
12246 *
12247 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12248 * @param a0 The first extra argument.
12249 * @param a1 The second extra argument.
12250 * @param a2 The third extra argument.
12251 */
12252#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12253 do { \
12254 IEM_MC_PREPARE_FPU_USAGE(); \
12255 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12256 } while (0)
12257
12258
12259/**
12260 * Calls a SSE assembly implementation taking two visible arguments.
12261 *
12262 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12263 * @param a0 The first extra argument.
12264 * @param a1 The second extra argument.
12265 */
12266#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12267 do { \
12268 IEM_MC_PREPARE_SSE_USAGE(); \
12269 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12270 } while (0)
12271
12272/**
12273 * Calls a SSE assembly implementation taking three visible arguments.
12274 *
12275 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12276 * @param a0 The first extra argument.
12277 * @param a1 The second extra argument.
12278 * @param a2 The third extra argument.
12279 */
12280#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12281 do { \
12282 IEM_MC_PREPARE_SSE_USAGE(); \
12283 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12284 } while (0)
12285
12286
12287/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12288 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12289#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12290 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
12291
12292/**
12293 * Calls a AVX assembly implementation taking two visible arguments.
12294 *
12295 * There is one implicit zero'th argument, a pointer to the extended state.
12296 *
12297 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12298 * @param a1 The first extra argument.
12299 * @param a2 The second extra argument.
12300 */
12301#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12302 do { \
12303 IEM_MC_PREPARE_AVX_USAGE(); \
12304 a_pfnAImpl(pXState, (a1), (a2)); \
12305 } while (0)
12306
12307/**
12308 * Calls a AVX assembly implementation taking three visible arguments.
12309 *
12310 * There is one implicit zero'th argument, a pointer to the extended state.
12311 *
12312 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12313 * @param a1 The first extra argument.
12314 * @param a2 The second extra argument.
12315 * @param a3 The third extra argument.
12316 */
12317#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12318 do { \
12319 IEM_MC_PREPARE_AVX_USAGE(); \
12320 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12321 } while (0)
12322
12323/** @note Not for IOPL or IF testing. */
12324#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12325/** @note Not for IOPL or IF testing. */
12326#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12327/** @note Not for IOPL or IF testing. */
12328#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12329/** @note Not for IOPL or IF testing. */
12330#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12331/** @note Not for IOPL or IF testing. */
12332#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12333 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12334 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12335/** @note Not for IOPL or IF testing. */
12336#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12337 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12338 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12339/** @note Not for IOPL or IF testing. */
12340#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12341 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12342 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12343 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12344/** @note Not for IOPL or IF testing. */
12345#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12346 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12347 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12348 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12349#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12350#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12351#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12352/** @note Not for IOPL or IF testing. */
12353#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12354 if ( pVCpu->cpum.GstCtx.cx != 0 \
12355 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12356/** @note Not for IOPL or IF testing. */
12357#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12358 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12359 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12360/** @note Not for IOPL or IF testing. */
12361#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12362 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12363 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12364/** @note Not for IOPL or IF testing. */
12365#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12366 if ( pVCpu->cpum.GstCtx.cx != 0 \
12367 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12368/** @note Not for IOPL or IF testing. */
12369#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12370 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12371 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12372/** @note Not for IOPL or IF testing. */
12373#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12374 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12375 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12376#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12377#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12378
12379#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12380 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12381#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12382 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12383#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12384 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12385#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12386 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12387#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12388 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12389#define IEM_MC_IF_FCW_IM() \
12390 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
12391
12392#define IEM_MC_ELSE() } else {
12393#define IEM_MC_ENDIF() } do {} while (0)
12394
12395/** @} */
12396
12397
12398/** @name Opcode Debug Helpers.
12399 * @{
12400 */
12401#ifdef VBOX_WITH_STATISTICS
12402# ifdef IN_RING3
12403# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
12404# else
12405# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
12406# endif
12407#else
12408# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12409#endif
12410
12411#ifdef DEBUG
12412# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12413 do { \
12414 IEMOP_INC_STATS(a_Stats); \
12415 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12416 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12417 } while (0)
12418
12419# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12420 do { \
12421 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12422 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12423 (void)RT_CONCAT(OP_,a_Upper); \
12424 (void)(a_fDisHints); \
12425 (void)(a_fIemHints); \
12426 } while (0)
12427
12428# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12429 do { \
12430 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12431 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12432 (void)RT_CONCAT(OP_,a_Upper); \
12433 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12434 (void)(a_fDisHints); \
12435 (void)(a_fIemHints); \
12436 } while (0)
12437
12438# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12439 do { \
12440 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12441 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12442 (void)RT_CONCAT(OP_,a_Upper); \
12443 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12444 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12445 (void)(a_fDisHints); \
12446 (void)(a_fIemHints); \
12447 } while (0)
12448
12449# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12450 do { \
12451 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12452 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12453 (void)RT_CONCAT(OP_,a_Upper); \
12454 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12455 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12456 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12457 (void)(a_fDisHints); \
12458 (void)(a_fIemHints); \
12459 } while (0)
12460
12461# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12462 do { \
12463 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12464 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12465 (void)RT_CONCAT(OP_,a_Upper); \
12466 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12467 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12468 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12469 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12470 (void)(a_fDisHints); \
12471 (void)(a_fIemHints); \
12472 } while (0)
12473
12474#else
12475# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12476
12477# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12478 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12479# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12480 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12481# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12482 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12483# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12484 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12485# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12486 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12487
12488#endif
12489
12490#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12491 IEMOP_MNEMONIC0EX(a_Lower, \
12492 #a_Lower, \
12493 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12494#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12495 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12496 #a_Lower " " #a_Op1, \
12497 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12498#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12499 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12500 #a_Lower " " #a_Op1 "," #a_Op2, \
12501 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12502#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12503 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12504 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12505 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12506#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12507 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12508 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12509 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12510
12511/** @} */
12512
12513
12514/** @name Opcode Helpers.
12515 * @{
12516 */
12517
12518#ifdef IN_RING3
12519# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12520 do { \
12521 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12522 else \
12523 { \
12524 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12525 return IEMOP_RAISE_INVALID_OPCODE(); \
12526 } \
12527 } while (0)
12528#else
12529# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12530 do { \
12531 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12532 else return IEMOP_RAISE_INVALID_OPCODE(); \
12533 } while (0)
12534#endif
12535
12536/** The instruction requires a 186 or later. */
12537#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12538# define IEMOP_HLP_MIN_186() do { } while (0)
12539#else
12540# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12541#endif
12542
12543/** The instruction requires a 286 or later. */
12544#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12545# define IEMOP_HLP_MIN_286() do { } while (0)
12546#else
12547# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12548#endif
12549
12550/** The instruction requires a 386 or later. */
12551#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12552# define IEMOP_HLP_MIN_386() do { } while (0)
12553#else
12554# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12555#endif
12556
12557/** The instruction requires a 386 or later if the given expression is true. */
12558#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12559# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12560#else
12561# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12562#endif
12563
12564/** The instruction requires a 486 or later. */
12565#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12566# define IEMOP_HLP_MIN_486() do { } while (0)
12567#else
12568# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12569#endif
12570
12571/** The instruction requires a Pentium (586) or later. */
12572#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12573# define IEMOP_HLP_MIN_586() do { } while (0)
12574#else
12575# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12576#endif
12577
12578/** The instruction requires a PentiumPro (686) or later. */
12579#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12580# define IEMOP_HLP_MIN_686() do { } while (0)
12581#else
12582# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12583#endif
12584
12585
12586/** The instruction raises an \#UD in real and V8086 mode. */
12587#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12588 do \
12589 { \
12590 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12591 else return IEMOP_RAISE_INVALID_OPCODE(); \
12592 } while (0)
12593
12594#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12595/** This instruction raises an \#UD in real and V8086 mode or when not using a
12596 * 64-bit code segment when in long mode (applicable to all VMX instructions
12597 * except VMCALL).
12598 */
12599#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12600 do \
12601 { \
12602 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12603 && ( !IEM_IS_LONG_MODE(pVCpu) \
12604 || IEM_IS_64BIT_CODE(pVCpu))) \
12605 { /* likely */ } \
12606 else \
12607 { \
12608 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12609 { \
12610 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12611 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12612 return IEMOP_RAISE_INVALID_OPCODE(); \
12613 } \
12614 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12615 { \
12616 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12617 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12618 return IEMOP_RAISE_INVALID_OPCODE(); \
12619 } \
12620 } \
12621 } while (0)
12622
12623/** The instruction can only be executed in VMX operation (VMX root mode and
12624 * non-root mode).
12625 *
12626 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12627 */
12628# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12629 do \
12630 { \
12631 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12632 else \
12633 { \
12634 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12635 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12636 return IEMOP_RAISE_INVALID_OPCODE(); \
12637 } \
12638 } while (0)
12639#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12640
12641/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12642 * 64-bit mode. */
12643#define IEMOP_HLP_NO_64BIT() \
12644 do \
12645 { \
12646 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12647 return IEMOP_RAISE_INVALID_OPCODE(); \
12648 } while (0)
12649
12650/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12651 * 64-bit mode. */
12652#define IEMOP_HLP_ONLY_64BIT() \
12653 do \
12654 { \
12655 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12656 return IEMOP_RAISE_INVALID_OPCODE(); \
12657 } while (0)
12658
12659/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12660#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12661 do \
12662 { \
12663 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12664 iemRecalEffOpSize64Default(pVCpu); \
12665 } while (0)
12666
12667/** The instruction has 64-bit operand size if 64-bit mode. */
12668#define IEMOP_HLP_64BIT_OP_SIZE() \
12669 do \
12670 { \
12671 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12672 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12673 } while (0)
12674
12675/** Only a REX prefix immediately preceeding the first opcode byte takes
12676 * effect. This macro helps ensuring this as well as logging bad guest code. */
12677#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12678 do \
12679 { \
12680 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12681 { \
12682 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12683 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12684 pVCpu->iem.s.uRexB = 0; \
12685 pVCpu->iem.s.uRexIndex = 0; \
12686 pVCpu->iem.s.uRexReg = 0; \
12687 iemRecalEffOpSize(pVCpu); \
12688 } \
12689 } while (0)
12690
12691/**
12692 * Done decoding.
12693 */
12694#define IEMOP_HLP_DONE_DECODING() \
12695 do \
12696 { \
12697 /*nothing for now, maybe later... */ \
12698 } while (0)
12699
12700/**
12701 * Done decoding, raise \#UD exception if lock prefix present.
12702 */
12703#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12704 do \
12705 { \
12706 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12707 { /* likely */ } \
12708 else \
12709 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12710 } while (0)
12711
12712
12713/**
12714 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12715 * repnz or size prefixes are present, or if in real or v8086 mode.
12716 */
12717#define IEMOP_HLP_DONE_VEX_DECODING() \
12718 do \
12719 { \
12720 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12721 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12722 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12723 { /* likely */ } \
12724 else \
12725 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12726 } while (0)
12727
12728/**
12729 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12730 * repnz or size prefixes are present, or if in real or v8086 mode.
12731 */
12732#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12733 do \
12734 { \
12735 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12736 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12737 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12738 && pVCpu->iem.s.uVexLength == 0)) \
12739 { /* likely */ } \
12740 else \
12741 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12742 } while (0)
12743
12744
12745/**
12746 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12747 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12748 * register 0, or if in real or v8086 mode.
12749 */
12750#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12751 do \
12752 { \
12753 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12754 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12755 && !pVCpu->iem.s.uVex3rdReg \
12756 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12757 { /* likely */ } \
12758 else \
12759 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12760 } while (0)
12761
12762/**
12763 * Done decoding VEX, no V, L=0.
12764 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12765 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12766 */
12767#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12768 do \
12769 { \
12770 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12771 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12772 && pVCpu->iem.s.uVexLength == 0 \
12773 && pVCpu->iem.s.uVex3rdReg == 0 \
12774 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12775 { /* likely */ } \
12776 else \
12777 return IEMOP_RAISE_INVALID_OPCODE(); \
12778 } while (0)
12779
12780#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12781 do \
12782 { \
12783 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12784 { /* likely */ } \
12785 else \
12786 { \
12787 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12788 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12789 } \
12790 } while (0)
12791#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12792 do \
12793 { \
12794 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12795 { /* likely */ } \
12796 else \
12797 { \
12798 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12799 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12800 } \
12801 } while (0)
12802
12803/**
12804 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12805 * are present.
12806 */
12807#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12808 do \
12809 { \
12810 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12811 { /* likely */ } \
12812 else \
12813 return IEMOP_RAISE_INVALID_OPCODE(); \
12814 } while (0)
12815
12816/**
12817 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12818 * prefixes are present.
12819 */
12820#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12821 do \
12822 { \
12823 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12824 { /* likely */ } \
12825 else \
12826 return IEMOP_RAISE_INVALID_OPCODE(); \
12827 } while (0)
12828
12829
12830/**
12831 * Calculates the effective address of a ModR/M memory operand.
12832 *
12833 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12834 *
12835 * @return Strict VBox status code.
12836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12837 * @param bRm The ModRM byte.
12838 * @param cbImm The size of any immediate following the
12839 * effective address opcode bytes. Important for
12840 * RIP relative addressing.
12841 * @param pGCPtrEff Where to return the effective address.
12842 */
12843IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12844{
12845 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12846# define SET_SS_DEF() \
12847 do \
12848 { \
12849 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12850 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12851 } while (0)
12852
12853 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12854 {
12855/** @todo Check the effective address size crap! */
12856 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12857 {
12858 uint16_t u16EffAddr;
12859
12860 /* Handle the disp16 form with no registers first. */
12861 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12862 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12863 else
12864 {
12865 /* Get the displacment. */
12866 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12867 {
12868 case 0: u16EffAddr = 0; break;
12869 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12870 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12871 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12872 }
12873
12874 /* Add the base and index registers to the disp. */
12875 switch (bRm & X86_MODRM_RM_MASK)
12876 {
12877 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12878 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12879 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12880 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12881 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12882 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12883 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12884 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12885 }
12886 }
12887
12888 *pGCPtrEff = u16EffAddr;
12889 }
12890 else
12891 {
12892 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12893 uint32_t u32EffAddr;
12894
12895 /* Handle the disp32 form with no registers first. */
12896 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12897 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12898 else
12899 {
12900 /* Get the register (or SIB) value. */
12901 switch ((bRm & X86_MODRM_RM_MASK))
12902 {
12903 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12904 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12905 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12906 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12907 case 4: /* SIB */
12908 {
12909 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12910
12911 /* Get the index and scale it. */
12912 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12913 {
12914 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12915 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12916 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12917 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12918 case 4: u32EffAddr = 0; /*none */ break;
12919 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12920 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12921 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12922 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12923 }
12924 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12925
12926 /* add base */
12927 switch (bSib & X86_SIB_BASE_MASK)
12928 {
12929 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12930 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12931 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12932 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12933 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12934 case 5:
12935 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12936 {
12937 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12938 SET_SS_DEF();
12939 }
12940 else
12941 {
12942 uint32_t u32Disp;
12943 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12944 u32EffAddr += u32Disp;
12945 }
12946 break;
12947 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12948 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12949 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12950 }
12951 break;
12952 }
12953 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12954 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12955 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12956 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12957 }
12958
12959 /* Get and add the displacement. */
12960 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12961 {
12962 case 0:
12963 break;
12964 case 1:
12965 {
12966 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12967 u32EffAddr += i8Disp;
12968 break;
12969 }
12970 case 2:
12971 {
12972 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12973 u32EffAddr += u32Disp;
12974 break;
12975 }
12976 default:
12977 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12978 }
12979
12980 }
12981 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12982 *pGCPtrEff = u32EffAddr;
12983 else
12984 {
12985 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12986 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12987 }
12988 }
12989 }
12990 else
12991 {
12992 uint64_t u64EffAddr;
12993
12994 /* Handle the rip+disp32 form with no registers first. */
12995 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12996 {
12997 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12998 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12999 }
13000 else
13001 {
13002 /* Get the register (or SIB) value. */
13003 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13004 {
13005 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13006 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13007 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13008 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13009 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13010 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13011 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13012 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13013 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13014 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13015 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13016 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13017 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13018 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13019 /* SIB */
13020 case 4:
13021 case 12:
13022 {
13023 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13024
13025 /* Get the index and scale it. */
13026 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13027 {
13028 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13029 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13030 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13031 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13032 case 4: u64EffAddr = 0; /*none */ break;
13033 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13034 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13035 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13036 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13037 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13038 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13039 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13040 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13041 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13042 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13043 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13044 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13045 }
13046 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13047
13048 /* add base */
13049 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13050 {
13051 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13052 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13053 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13054 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13055 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13056 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13057 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13058 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13059 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13060 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13061 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13062 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13063 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13064 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13065 /* complicated encodings */
13066 case 5:
13067 case 13:
13068 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13069 {
13070 if (!pVCpu->iem.s.uRexB)
13071 {
13072 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13073 SET_SS_DEF();
13074 }
13075 else
13076 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13077 }
13078 else
13079 {
13080 uint32_t u32Disp;
13081 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13082 u64EffAddr += (int32_t)u32Disp;
13083 }
13084 break;
13085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13086 }
13087 break;
13088 }
13089 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13090 }
13091
13092 /* Get and add the displacement. */
13093 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13094 {
13095 case 0:
13096 break;
13097 case 1:
13098 {
13099 int8_t i8Disp;
13100 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13101 u64EffAddr += i8Disp;
13102 break;
13103 }
13104 case 2:
13105 {
13106 uint32_t u32Disp;
13107 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13108 u64EffAddr += (int32_t)u32Disp;
13109 break;
13110 }
13111 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13112 }
13113
13114 }
13115
13116 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13117 *pGCPtrEff = u64EffAddr;
13118 else
13119 {
13120 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13121 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13122 }
13123 }
13124
13125 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13126 return VINF_SUCCESS;
13127}
13128
13129
13130/**
13131 * Calculates the effective address of a ModR/M memory operand.
13132 *
13133 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13134 *
13135 * @return Strict VBox status code.
13136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13137 * @param bRm The ModRM byte.
13138 * @param cbImm The size of any immediate following the
13139 * effective address opcode bytes. Important for
13140 * RIP relative addressing.
13141 * @param pGCPtrEff Where to return the effective address.
13142 * @param offRsp RSP displacement.
13143 */
13144IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13145{
13146 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13147# define SET_SS_DEF() \
13148 do \
13149 { \
13150 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13151 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13152 } while (0)
13153
13154 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13155 {
13156/** @todo Check the effective address size crap! */
13157 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13158 {
13159 uint16_t u16EffAddr;
13160
13161 /* Handle the disp16 form with no registers first. */
13162 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13163 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13164 else
13165 {
13166 /* Get the displacment. */
13167 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13168 {
13169 case 0: u16EffAddr = 0; break;
13170 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13171 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13172 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13173 }
13174
13175 /* Add the base and index registers to the disp. */
13176 switch (bRm & X86_MODRM_RM_MASK)
13177 {
13178 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13179 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13180 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13181 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13182 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13183 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13184 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13185 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13186 }
13187 }
13188
13189 *pGCPtrEff = u16EffAddr;
13190 }
13191 else
13192 {
13193 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13194 uint32_t u32EffAddr;
13195
13196 /* Handle the disp32 form with no registers first. */
13197 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13198 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13199 else
13200 {
13201 /* Get the register (or SIB) value. */
13202 switch ((bRm & X86_MODRM_RM_MASK))
13203 {
13204 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13205 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13206 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13207 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13208 case 4: /* SIB */
13209 {
13210 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13211
13212 /* Get the index and scale it. */
13213 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13214 {
13215 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13216 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13217 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13218 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13219 case 4: u32EffAddr = 0; /*none */ break;
13220 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13221 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13222 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13223 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13224 }
13225 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13226
13227 /* add base */
13228 switch (bSib & X86_SIB_BASE_MASK)
13229 {
13230 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13231 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13232 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13233 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13234 case 4:
13235 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13236 SET_SS_DEF();
13237 break;
13238 case 5:
13239 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13240 {
13241 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13242 SET_SS_DEF();
13243 }
13244 else
13245 {
13246 uint32_t u32Disp;
13247 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13248 u32EffAddr += u32Disp;
13249 }
13250 break;
13251 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13252 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13253 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13254 }
13255 break;
13256 }
13257 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13258 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13259 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13260 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13261 }
13262
13263 /* Get and add the displacement. */
13264 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13265 {
13266 case 0:
13267 break;
13268 case 1:
13269 {
13270 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13271 u32EffAddr += i8Disp;
13272 break;
13273 }
13274 case 2:
13275 {
13276 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13277 u32EffAddr += u32Disp;
13278 break;
13279 }
13280 default:
13281 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13282 }
13283
13284 }
13285 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13286 *pGCPtrEff = u32EffAddr;
13287 else
13288 {
13289 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13290 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13291 }
13292 }
13293 }
13294 else
13295 {
13296 uint64_t u64EffAddr;
13297
13298 /* Handle the rip+disp32 form with no registers first. */
13299 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13300 {
13301 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13302 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13303 }
13304 else
13305 {
13306 /* Get the register (or SIB) value. */
13307 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13308 {
13309 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13310 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13311 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13312 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13313 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13314 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13315 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13316 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13317 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13318 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13319 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13320 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13321 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13322 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13323 /* SIB */
13324 case 4:
13325 case 12:
13326 {
13327 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13328
13329 /* Get the index and scale it. */
13330 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13331 {
13332 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13333 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13334 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13335 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13336 case 4: u64EffAddr = 0; /*none */ break;
13337 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13338 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13339 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13340 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13341 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13342 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13343 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13344 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13345 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13346 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13347 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13349 }
13350 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13351
13352 /* add base */
13353 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13354 {
13355 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13356 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13357 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13358 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13359 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13360 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13361 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13362 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13363 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13364 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13365 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13366 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13367 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13368 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13369 /* complicated encodings */
13370 case 5:
13371 case 13:
13372 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13373 {
13374 if (!pVCpu->iem.s.uRexB)
13375 {
13376 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13377 SET_SS_DEF();
13378 }
13379 else
13380 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13381 }
13382 else
13383 {
13384 uint32_t u32Disp;
13385 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13386 u64EffAddr += (int32_t)u32Disp;
13387 }
13388 break;
13389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13390 }
13391 break;
13392 }
13393 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13394 }
13395
13396 /* Get and add the displacement. */
13397 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13398 {
13399 case 0:
13400 break;
13401 case 1:
13402 {
13403 int8_t i8Disp;
13404 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13405 u64EffAddr += i8Disp;
13406 break;
13407 }
13408 case 2:
13409 {
13410 uint32_t u32Disp;
13411 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13412 u64EffAddr += (int32_t)u32Disp;
13413 break;
13414 }
13415 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13416 }
13417
13418 }
13419
13420 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13421 *pGCPtrEff = u64EffAddr;
13422 else
13423 {
13424 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13425 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13426 }
13427 }
13428
13429 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13430 return VINF_SUCCESS;
13431}
13432
13433
13434#ifdef IEM_WITH_SETJMP
13435/**
13436 * Calculates the effective address of a ModR/M memory operand.
13437 *
13438 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13439 *
13440 * May longjmp on internal error.
13441 *
13442 * @return The effective address.
13443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13444 * @param bRm The ModRM byte.
13445 * @param cbImm The size of any immediate following the
13446 * effective address opcode bytes. Important for
13447 * RIP relative addressing.
13448 */
13449IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13450{
13451 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13452# define SET_SS_DEF() \
13453 do \
13454 { \
13455 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13456 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13457 } while (0)
13458
13459 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13460 {
13461/** @todo Check the effective address size crap! */
13462 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13463 {
13464 uint16_t u16EffAddr;
13465
13466 /* Handle the disp16 form with no registers first. */
13467 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13468 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13469 else
13470 {
13471 /* Get the displacment. */
13472 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13473 {
13474 case 0: u16EffAddr = 0; break;
13475 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13476 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13477 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13478 }
13479
13480 /* Add the base and index registers to the disp. */
13481 switch (bRm & X86_MODRM_RM_MASK)
13482 {
13483 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13484 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13485 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13486 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13487 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13488 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13489 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13490 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13491 }
13492 }
13493
13494 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13495 return u16EffAddr;
13496 }
13497
13498 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13499 uint32_t u32EffAddr;
13500
13501 /* Handle the disp32 form with no registers first. */
13502 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13503 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13504 else
13505 {
13506 /* Get the register (or SIB) value. */
13507 switch ((bRm & X86_MODRM_RM_MASK))
13508 {
13509 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13510 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13511 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13512 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13513 case 4: /* SIB */
13514 {
13515 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13516
13517 /* Get the index and scale it. */
13518 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13519 {
13520 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13521 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13522 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13523 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13524 case 4: u32EffAddr = 0; /*none */ break;
13525 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13526 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13527 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13528 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13529 }
13530 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13531
13532 /* add base */
13533 switch (bSib & X86_SIB_BASE_MASK)
13534 {
13535 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13536 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13537 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13538 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13539 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13540 case 5:
13541 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13542 {
13543 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13544 SET_SS_DEF();
13545 }
13546 else
13547 {
13548 uint32_t u32Disp;
13549 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13550 u32EffAddr += u32Disp;
13551 }
13552 break;
13553 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13554 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13555 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13556 }
13557 break;
13558 }
13559 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13560 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13561 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13562 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13563 }
13564
13565 /* Get and add the displacement. */
13566 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13567 {
13568 case 0:
13569 break;
13570 case 1:
13571 {
13572 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13573 u32EffAddr += i8Disp;
13574 break;
13575 }
13576 case 2:
13577 {
13578 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13579 u32EffAddr += u32Disp;
13580 break;
13581 }
13582 default:
13583 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13584 }
13585 }
13586
13587 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13588 {
13589 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13590 return u32EffAddr;
13591 }
13592 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13593 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13594 return u32EffAddr & UINT16_MAX;
13595 }
13596
13597 uint64_t u64EffAddr;
13598
13599 /* Handle the rip+disp32 form with no registers first. */
13600 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13601 {
13602 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13603 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13604 }
13605 else
13606 {
13607 /* Get the register (or SIB) value. */
13608 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13609 {
13610 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13611 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13612 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13613 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13614 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13615 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13616 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13617 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13618 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13619 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13620 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13621 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13622 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13623 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13624 /* SIB */
13625 case 4:
13626 case 12:
13627 {
13628 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13629
13630 /* Get the index and scale it. */
13631 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13632 {
13633 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13634 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13635 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13636 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13637 case 4: u64EffAddr = 0; /*none */ break;
13638 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13639 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13640 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13641 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13642 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13643 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13644 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13645 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13646 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13647 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13648 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13649 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13650 }
13651 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13652
13653 /* add base */
13654 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13655 {
13656 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13657 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13658 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13659 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13660 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13661 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13662 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13663 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13664 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13665 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13666 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13667 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13668 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13669 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13670 /* complicated encodings */
13671 case 5:
13672 case 13:
13673 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13674 {
13675 if (!pVCpu->iem.s.uRexB)
13676 {
13677 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13678 SET_SS_DEF();
13679 }
13680 else
13681 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13682 }
13683 else
13684 {
13685 uint32_t u32Disp;
13686 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13687 u64EffAddr += (int32_t)u32Disp;
13688 }
13689 break;
13690 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13691 }
13692 break;
13693 }
13694 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13695 }
13696
13697 /* Get and add the displacement. */
13698 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13699 {
13700 case 0:
13701 break;
13702 case 1:
13703 {
13704 int8_t i8Disp;
13705 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13706 u64EffAddr += i8Disp;
13707 break;
13708 }
13709 case 2:
13710 {
13711 uint32_t u32Disp;
13712 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13713 u64EffAddr += (int32_t)u32Disp;
13714 break;
13715 }
13716 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13717 }
13718
13719 }
13720
13721 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13722 {
13723 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13724 return u64EffAddr;
13725 }
13726 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13727 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13728 return u64EffAddr & UINT32_MAX;
13729}
13730#endif /* IEM_WITH_SETJMP */
13731
13732/** @} */
13733
13734
13735
13736/*
13737 * Include the instructions
13738 */
13739#include "IEMAllInstructions.cpp.h"
13740
13741
13742
13743#ifdef LOG_ENABLED
13744/**
13745 * Logs the current instruction.
13746 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13747 * @param fSameCtx Set if we have the same context information as the VMM,
13748 * clear if we may have already executed an instruction in
13749 * our debug context. When clear, we assume IEMCPU holds
13750 * valid CPU mode info.
13751 *
13752 * The @a fSameCtx parameter is now misleading and obsolete.
13753 * @param pszFunction The IEM function doing the execution.
13754 */
13755IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13756{
13757# ifdef IN_RING3
13758 if (LogIs2Enabled())
13759 {
13760 char szInstr[256];
13761 uint32_t cbInstr = 0;
13762 if (fSameCtx)
13763 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13764 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13765 szInstr, sizeof(szInstr), &cbInstr);
13766 else
13767 {
13768 uint32_t fFlags = 0;
13769 switch (pVCpu->iem.s.enmCpuMode)
13770 {
13771 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13772 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13773 case IEMMODE_16BIT:
13774 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13775 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13776 else
13777 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13778 break;
13779 }
13780 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13781 szInstr, sizeof(szInstr), &cbInstr);
13782 }
13783
13784 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
13785 Log2(("**** %s\n"
13786 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13787 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13788 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13789 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13790 " %s\n"
13791 , pszFunction,
13792 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13793 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13794 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13795 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13796 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13797 szInstr));
13798
13799 if (LogIs3Enabled())
13800 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13801 }
13802 else
13803# endif
13804 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13805 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13806 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13807}
13808#endif /* LOG_ENABLED */
13809
13810
13811#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13812/**
13813 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
13814 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
13815 *
13816 * @returns Modified rcStrict.
13817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13818 * @param rcStrict The instruction execution status.
13819 */
13820static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13821{
13822 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
13823 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
13824 {
13825 /* VMX preemption timer takes priority over NMI-window exits. */
13826 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13827 {
13828 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13829 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13830 }
13831 /*
13832 * Check remaining intercepts.
13833 *
13834 * NMI-window and Interrupt-window VM-exits.
13835 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13836 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13837 *
13838 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13839 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13840 */
13841 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
13842 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13843 && !TRPMHasTrap(pVCpu))
13844 {
13845 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
13846 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13847 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
13848 {
13849 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13850 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13851 }
13852 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13853 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
13854 {
13855 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13856 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13857 }
13858 }
13859 }
13860 /* TPR-below threshold/APIC write has the highest priority. */
13861 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13862 {
13863 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13864 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13865 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13866 }
13867 /* MTF takes priority over VMX-preemption timer. */
13868 else
13869 {
13870 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13871 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13872 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13873 }
13874 return rcStrict;
13875}
13876#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13877
13878
13879/**
13880 * Makes status code addjustments (pass up from I/O and access handler)
13881 * as well as maintaining statistics.
13882 *
13883 * @returns Strict VBox status code to pass up.
13884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13885 * @param rcStrict The status from executing an instruction.
13886 */
13887DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13888{
13889 if (rcStrict != VINF_SUCCESS)
13890 {
13891 if (RT_SUCCESS(rcStrict))
13892 {
13893 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13894 || rcStrict == VINF_IOM_R3_IOPORT_READ
13895 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13896 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13897 || rcStrict == VINF_IOM_R3_MMIO_READ
13898 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13899 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13900 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13901 || rcStrict == VINF_CPUM_R3_MSR_READ
13902 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13903 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13904 || rcStrict == VINF_EM_RAW_TO_R3
13905 || rcStrict == VINF_EM_TRIPLE_FAULT
13906 || rcStrict == VINF_GIM_R3_HYPERCALL
13907 /* raw-mode / virt handlers only: */
13908 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13909 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13910 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13911 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13912 || rcStrict == VINF_SELM_SYNC_GDT
13913 || rcStrict == VINF_CSAM_PENDING_ACTION
13914 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13915 /* nested hw.virt codes: */
13916 || rcStrict == VINF_VMX_VMEXIT
13917 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
13918 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13919 || rcStrict == VINF_SVM_VMEXIT
13920 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13921/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13922 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13923#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13924 if ( rcStrict == VINF_VMX_VMEXIT
13925 && rcPassUp == VINF_SUCCESS)
13926 rcStrict = VINF_SUCCESS;
13927 else
13928#endif
13929#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13930 if ( rcStrict == VINF_SVM_VMEXIT
13931 && rcPassUp == VINF_SUCCESS)
13932 rcStrict = VINF_SUCCESS;
13933 else
13934#endif
13935 if (rcPassUp == VINF_SUCCESS)
13936 pVCpu->iem.s.cRetInfStatuses++;
13937 else if ( rcPassUp < VINF_EM_FIRST
13938 || rcPassUp > VINF_EM_LAST
13939 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13940 {
13941 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13942 pVCpu->iem.s.cRetPassUpStatus++;
13943 rcStrict = rcPassUp;
13944 }
13945 else
13946 {
13947 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13948 pVCpu->iem.s.cRetInfStatuses++;
13949 }
13950 }
13951 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13952 pVCpu->iem.s.cRetAspectNotImplemented++;
13953 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13954 pVCpu->iem.s.cRetInstrNotImplemented++;
13955 else
13956 pVCpu->iem.s.cRetErrStatuses++;
13957 }
13958 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13959 {
13960 pVCpu->iem.s.cRetPassUpStatus++;
13961 rcStrict = pVCpu->iem.s.rcPassUp;
13962 }
13963
13964 return rcStrict;
13965}
13966
13967
13968/**
13969 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13970 * IEMExecOneWithPrefetchedByPC.
13971 *
13972 * Similar code is found in IEMExecLots.
13973 *
13974 * @return Strict VBox status code.
13975 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13976 * @param fExecuteInhibit If set, execute the instruction following CLI,
13977 * POP SS and MOV SS,GR.
13978 * @param pszFunction The calling function name.
13979 */
13980DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13981{
13982 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13983 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13984 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13985 RT_NOREF_PV(pszFunction);
13986
13987#ifdef IEM_WITH_SETJMP
13988 VBOXSTRICTRC rcStrict;
13989 jmp_buf JmpBuf;
13990 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13991 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13992 if ((rcStrict = setjmp(JmpBuf)) == 0)
13993 {
13994 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13995 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13996 }
13997 else
13998 pVCpu->iem.s.cLongJumps++;
13999 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14000#else
14001 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14002 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14003#endif
14004 if (rcStrict == VINF_SUCCESS)
14005 pVCpu->iem.s.cInstructions++;
14006 if (pVCpu->iem.s.cActiveMappings > 0)
14007 {
14008 Assert(rcStrict != VINF_SUCCESS);
14009 iemMemRollback(pVCpu);
14010 }
14011 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14012 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14013 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14014
14015//#ifdef DEBUG
14016// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14017//#endif
14018
14019#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14020 /*
14021 * Perform any VMX nested-guest instruction boundary actions.
14022 *
14023 * If any of these causes a VM-exit, we must skip executing the next
14024 * instruction (would run into stale page tables). A VM-exit makes sure
14025 * there is no interrupt-inhibition, so that should ensure we don't go
14026 * to try execute the next instruction. Clearing fExecuteInhibit is
14027 * problematic because of the setjmp/longjmp clobbering above.
14028 */
14029 if ( rcStrict == VINF_SUCCESS
14030 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14031 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14032 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14033#endif
14034
14035 /* Execute the next instruction as well if a cli, pop ss or
14036 mov ss, Gr has just completed successfully. */
14037 if ( fExecuteInhibit
14038 && rcStrict == VINF_SUCCESS
14039 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14040 && EMIsInhibitInterruptsActive(pVCpu))
14041 {
14042 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
14043 if (rcStrict == VINF_SUCCESS)
14044 {
14045#ifdef LOG_ENABLED
14046 iemLogCurInstr(pVCpu, false, pszFunction);
14047#endif
14048#ifdef IEM_WITH_SETJMP
14049 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14050 if ((rcStrict = setjmp(JmpBuf)) == 0)
14051 {
14052 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14053 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14054 }
14055 else
14056 pVCpu->iem.s.cLongJumps++;
14057 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14058#else
14059 IEM_OPCODE_GET_NEXT_U8(&b);
14060 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14061#endif
14062 if (rcStrict == VINF_SUCCESS)
14063 pVCpu->iem.s.cInstructions++;
14064 if (pVCpu->iem.s.cActiveMappings > 0)
14065 {
14066 Assert(rcStrict != VINF_SUCCESS);
14067 iemMemRollback(pVCpu);
14068 }
14069 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14070 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14071 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14072 }
14073 else if (pVCpu->iem.s.cActiveMappings > 0)
14074 iemMemRollback(pVCpu);
14075 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14076 }
14077
14078 /*
14079 * Return value fiddling, statistics and sanity assertions.
14080 */
14081 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14082
14083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14084 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14085 return rcStrict;
14086}
14087
14088
14089/**
14090 * Execute one instruction.
14091 *
14092 * @return Strict VBox status code.
14093 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14094 */
14095VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14096{
14097 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
14098#ifdef LOG_ENABLED
14099 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14100#endif
14101
14102 /*
14103 * Do the decoding and emulation.
14104 */
14105 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14106 if (rcStrict == VINF_SUCCESS)
14107 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14108 else if (pVCpu->iem.s.cActiveMappings > 0)
14109 iemMemRollback(pVCpu);
14110
14111 if (rcStrict != VINF_SUCCESS)
14112 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14113 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14114 return rcStrict;
14115}
14116
14117
14118VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14119{
14120 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14121
14122 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14123 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14124 if (rcStrict == VINF_SUCCESS)
14125 {
14126 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14127 if (pcbWritten)
14128 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14129 }
14130 else if (pVCpu->iem.s.cActiveMappings > 0)
14131 iemMemRollback(pVCpu);
14132
14133 return rcStrict;
14134}
14135
14136
14137VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14138 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14139{
14140 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14141
14142 VBOXSTRICTRC rcStrict;
14143 if ( cbOpcodeBytes
14144 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14145 {
14146 iemInitDecoder(pVCpu, false, false);
14147#ifdef IEM_WITH_CODE_TLB
14148 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14149 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14150 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14151 pVCpu->iem.s.offCurInstrStart = 0;
14152 pVCpu->iem.s.offInstrNextByte = 0;
14153#else
14154 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14155 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14156#endif
14157 rcStrict = VINF_SUCCESS;
14158 }
14159 else
14160 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14161 if (rcStrict == VINF_SUCCESS)
14162 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14163 else if (pVCpu->iem.s.cActiveMappings > 0)
14164 iemMemRollback(pVCpu);
14165
14166 return rcStrict;
14167}
14168
14169
14170VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14171{
14172 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14173
14174 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14175 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14176 if (rcStrict == VINF_SUCCESS)
14177 {
14178 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14179 if (pcbWritten)
14180 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14181 }
14182 else if (pVCpu->iem.s.cActiveMappings > 0)
14183 iemMemRollback(pVCpu);
14184
14185 return rcStrict;
14186}
14187
14188
14189VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14190 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14191{
14192 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14193
14194 VBOXSTRICTRC rcStrict;
14195 if ( cbOpcodeBytes
14196 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14197 {
14198 iemInitDecoder(pVCpu, true, false);
14199#ifdef IEM_WITH_CODE_TLB
14200 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14201 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14202 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14203 pVCpu->iem.s.offCurInstrStart = 0;
14204 pVCpu->iem.s.offInstrNextByte = 0;
14205#else
14206 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14207 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14208#endif
14209 rcStrict = VINF_SUCCESS;
14210 }
14211 else
14212 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14213 if (rcStrict == VINF_SUCCESS)
14214 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14215 else if (pVCpu->iem.s.cActiveMappings > 0)
14216 iemMemRollback(pVCpu);
14217
14218 return rcStrict;
14219}
14220
14221
14222/**
14223 * For debugging DISGetParamSize, may come in handy.
14224 *
14225 * @returns Strict VBox status code.
14226 * @param pVCpu The cross context virtual CPU structure of the
14227 * calling EMT.
14228 * @param pCtxCore The context core structure.
14229 * @param OpcodeBytesPC The PC of the opcode bytes.
14230 * @param pvOpcodeBytes Prefeched opcode bytes.
14231 * @param cbOpcodeBytes Number of prefetched bytes.
14232 * @param pcbWritten Where to return the number of bytes written.
14233 * Optional.
14234 */
14235VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14236 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14237 uint32_t *pcbWritten)
14238{
14239 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14240
14241 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14242 VBOXSTRICTRC rcStrict;
14243 if ( cbOpcodeBytes
14244 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14245 {
14246 iemInitDecoder(pVCpu, true, false);
14247#ifdef IEM_WITH_CODE_TLB
14248 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14249 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14250 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14251 pVCpu->iem.s.offCurInstrStart = 0;
14252 pVCpu->iem.s.offInstrNextByte = 0;
14253#else
14254 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14255 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14256#endif
14257 rcStrict = VINF_SUCCESS;
14258 }
14259 else
14260 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14261 if (rcStrict == VINF_SUCCESS)
14262 {
14263 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14264 if (pcbWritten)
14265 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14266 }
14267 else if (pVCpu->iem.s.cActiveMappings > 0)
14268 iemMemRollback(pVCpu);
14269
14270 return rcStrict;
14271}
14272
14273
14274/**
14275 * For handling split cacheline lock operations when the host has split-lock
14276 * detection enabled.
14277 *
14278 * This will cause the interpreter to disregard the lock prefix and implicit
14279 * locking (xchg).
14280 *
14281 * @returns Strict VBox status code.
14282 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14283 */
14284VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
14285{
14286 /*
14287 * Do the decoding and emulation.
14288 */
14289 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
14290 if (rcStrict == VINF_SUCCESS)
14291 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
14292 else if (pVCpu->iem.s.cActiveMappings > 0)
14293 iemMemRollback(pVCpu);
14294
14295 if (rcStrict != VINF_SUCCESS)
14296 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14297 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14298 return rcStrict;
14299}
14300
14301
14302VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14303{
14304 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14305 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14306
14307 /*
14308 * See if there is an interrupt pending in TRPM, inject it if we can.
14309 */
14310 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14311#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14312 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14313 if (fIntrEnabled)
14314 {
14315 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14316 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14317 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14318 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14319 else
14320 {
14321 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14322 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14323 }
14324 }
14325#else
14326 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14327#endif
14328
14329 /** @todo What if we are injecting an exception and not an interrupt? Is that
14330 * possible here? For now we assert it is indeed only an interrupt. */
14331 if ( fIntrEnabled
14332 && TRPMHasTrap(pVCpu)
14333 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14334 {
14335 uint8_t u8TrapNo;
14336 TRPMEVENT enmType;
14337 uint32_t uErrCode;
14338 RTGCPTR uCr2;
14339 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14340 AssertRC(rc2);
14341 Assert(enmType == TRPM_HARDWARE_INT);
14342 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14343 TRPMResetTrap(pVCpu);
14344#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14345 /* Injecting an event may cause a VM-exit. */
14346 if ( rcStrict != VINF_SUCCESS
14347 && rcStrict != VINF_IEM_RAISED_XCPT)
14348 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14349#else
14350 NOREF(rcStrict);
14351#endif
14352 }
14353
14354 /*
14355 * Initial decoder init w/ prefetch, then setup setjmp.
14356 */
14357 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14358 if (rcStrict == VINF_SUCCESS)
14359 {
14360#ifdef IEM_WITH_SETJMP
14361 jmp_buf JmpBuf;
14362 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14363 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14364 pVCpu->iem.s.cActiveMappings = 0;
14365 if ((rcStrict = setjmp(JmpBuf)) == 0)
14366#endif
14367 {
14368 /*
14369 * The run loop. We limit ourselves to 4096 instructions right now.
14370 */
14371 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14372 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14373 for (;;)
14374 {
14375 /*
14376 * Log the state.
14377 */
14378#ifdef LOG_ENABLED
14379 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14380#endif
14381
14382 /*
14383 * Do the decoding and emulation.
14384 */
14385 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14386 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14387 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14388 {
14389 Assert(pVCpu->iem.s.cActiveMappings == 0);
14390 pVCpu->iem.s.cInstructions++;
14391 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14392 {
14393 uint64_t fCpu = pVCpu->fLocalForcedActions
14394 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14395 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14396 | VMCPU_FF_TLB_FLUSH
14397 | VMCPU_FF_INHIBIT_INTERRUPTS
14398 | VMCPU_FF_BLOCK_NMIS
14399 | VMCPU_FF_UNHALT ));
14400
14401 if (RT_LIKELY( ( !fCpu
14402 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14403 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14404 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14405 {
14406 if (cMaxInstructionsGccStupidity-- > 0)
14407 {
14408 /* Poll timers every now an then according to the caller's specs. */
14409 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14410 || !TMTimerPollBool(pVM, pVCpu))
14411 {
14412 Assert(pVCpu->iem.s.cActiveMappings == 0);
14413 iemReInitDecoder(pVCpu);
14414 continue;
14415 }
14416 }
14417 }
14418 }
14419 Assert(pVCpu->iem.s.cActiveMappings == 0);
14420 }
14421 else if (pVCpu->iem.s.cActiveMappings > 0)
14422 iemMemRollback(pVCpu);
14423 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14424 break;
14425 }
14426 }
14427#ifdef IEM_WITH_SETJMP
14428 else
14429 {
14430 if (pVCpu->iem.s.cActiveMappings > 0)
14431 iemMemRollback(pVCpu);
14432# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14433 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14434# endif
14435 pVCpu->iem.s.cLongJumps++;
14436 }
14437 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14438#endif
14439
14440 /*
14441 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14442 */
14443 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14444 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14445 }
14446 else
14447 {
14448 if (pVCpu->iem.s.cActiveMappings > 0)
14449 iemMemRollback(pVCpu);
14450
14451#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14452 /*
14453 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14454 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14455 */
14456 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14457#endif
14458 }
14459
14460 /*
14461 * Maybe re-enter raw-mode and log.
14462 */
14463 if (rcStrict != VINF_SUCCESS)
14464 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14465 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14466 if (pcInstructions)
14467 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14468 return rcStrict;
14469}
14470
14471
14472/**
14473 * Interface used by EMExecuteExec, does exit statistics and limits.
14474 *
14475 * @returns Strict VBox status code.
14476 * @param pVCpu The cross context virtual CPU structure.
14477 * @param fWillExit To be defined.
14478 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14479 * @param cMaxInstructions Maximum number of instructions to execute.
14480 * @param cMaxInstructionsWithoutExits
14481 * The max number of instructions without exits.
14482 * @param pStats Where to return statistics.
14483 */
14484VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14485 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14486{
14487 NOREF(fWillExit); /** @todo define flexible exit crits */
14488
14489 /*
14490 * Initialize return stats.
14491 */
14492 pStats->cInstructions = 0;
14493 pStats->cExits = 0;
14494 pStats->cMaxExitDistance = 0;
14495 pStats->cReserved = 0;
14496
14497 /*
14498 * Initial decoder init w/ prefetch, then setup setjmp.
14499 */
14500 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14501 if (rcStrict == VINF_SUCCESS)
14502 {
14503#ifdef IEM_WITH_SETJMP
14504 jmp_buf JmpBuf;
14505 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14506 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14507 pVCpu->iem.s.cActiveMappings = 0;
14508 if ((rcStrict = setjmp(JmpBuf)) == 0)
14509#endif
14510 {
14511#ifdef IN_RING0
14512 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14513#endif
14514 uint32_t cInstructionSinceLastExit = 0;
14515
14516 /*
14517 * The run loop. We limit ourselves to 4096 instructions right now.
14518 */
14519 PVM pVM = pVCpu->CTX_SUFF(pVM);
14520 for (;;)
14521 {
14522 /*
14523 * Log the state.
14524 */
14525#ifdef LOG_ENABLED
14526 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14527#endif
14528
14529 /*
14530 * Do the decoding and emulation.
14531 */
14532 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14533
14534 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14535 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14536
14537 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14538 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14539 {
14540 pStats->cExits += 1;
14541 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14542 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14543 cInstructionSinceLastExit = 0;
14544 }
14545
14546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14547 {
14548 Assert(pVCpu->iem.s.cActiveMappings == 0);
14549 pVCpu->iem.s.cInstructions++;
14550 pStats->cInstructions++;
14551 cInstructionSinceLastExit++;
14552 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14553 {
14554 uint64_t fCpu = pVCpu->fLocalForcedActions
14555 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14556 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14557 | VMCPU_FF_TLB_FLUSH
14558 | VMCPU_FF_INHIBIT_INTERRUPTS
14559 | VMCPU_FF_BLOCK_NMIS
14560 | VMCPU_FF_UNHALT ));
14561
14562 if (RT_LIKELY( ( ( !fCpu
14563 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14564 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14565 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14566 || pStats->cInstructions < cMinInstructions))
14567 {
14568 if (pStats->cInstructions < cMaxInstructions)
14569 {
14570 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14571 {
14572#ifdef IN_RING0
14573 if ( !fCheckPreemptionPending
14574 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14575#endif
14576 {
14577 Assert(pVCpu->iem.s.cActiveMappings == 0);
14578 iemReInitDecoder(pVCpu);
14579 continue;
14580 }
14581#ifdef IN_RING0
14582 rcStrict = VINF_EM_RAW_INTERRUPT;
14583 break;
14584#endif
14585 }
14586 }
14587 }
14588 Assert(!(fCpu & VMCPU_FF_IEM));
14589 }
14590 Assert(pVCpu->iem.s.cActiveMappings == 0);
14591 }
14592 else if (pVCpu->iem.s.cActiveMappings > 0)
14593 iemMemRollback(pVCpu);
14594 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14595 break;
14596 }
14597 }
14598#ifdef IEM_WITH_SETJMP
14599 else
14600 {
14601 if (pVCpu->iem.s.cActiveMappings > 0)
14602 iemMemRollback(pVCpu);
14603 pVCpu->iem.s.cLongJumps++;
14604 }
14605 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14606#endif
14607
14608 /*
14609 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14610 */
14611 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14612 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14613 }
14614 else
14615 {
14616 if (pVCpu->iem.s.cActiveMappings > 0)
14617 iemMemRollback(pVCpu);
14618
14619#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14620 /*
14621 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14622 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14623 */
14624 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14625#endif
14626 }
14627
14628 /*
14629 * Maybe re-enter raw-mode and log.
14630 */
14631 if (rcStrict != VINF_SUCCESS)
14632 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14633 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14634 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14635 return rcStrict;
14636}
14637
14638
14639/**
14640 * Injects a trap, fault, abort, software interrupt or external interrupt.
14641 *
14642 * The parameter list matches TRPMQueryTrapAll pretty closely.
14643 *
14644 * @returns Strict VBox status code.
14645 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14646 * @param u8TrapNo The trap number.
14647 * @param enmType What type is it (trap/fault/abort), software
14648 * interrupt or hardware interrupt.
14649 * @param uErrCode The error code if applicable.
14650 * @param uCr2 The CR2 value if applicable.
14651 * @param cbInstr The instruction length (only relevant for
14652 * software interrupts).
14653 */
14654VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14655 uint8_t cbInstr)
14656{
14657 iemInitDecoder(pVCpu, false, false);
14658#ifdef DBGFTRACE_ENABLED
14659 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14660 u8TrapNo, enmType, uErrCode, uCr2);
14661#endif
14662
14663 uint32_t fFlags;
14664 switch (enmType)
14665 {
14666 case TRPM_HARDWARE_INT:
14667 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14668 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14669 uErrCode = uCr2 = 0;
14670 break;
14671
14672 case TRPM_SOFTWARE_INT:
14673 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14674 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14675 uErrCode = uCr2 = 0;
14676 break;
14677
14678 case TRPM_TRAP:
14679 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14680 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14681 if (u8TrapNo == X86_XCPT_PF)
14682 fFlags |= IEM_XCPT_FLAGS_CR2;
14683 switch (u8TrapNo)
14684 {
14685 case X86_XCPT_DF:
14686 case X86_XCPT_TS:
14687 case X86_XCPT_NP:
14688 case X86_XCPT_SS:
14689 case X86_XCPT_PF:
14690 case X86_XCPT_AC:
14691 case X86_XCPT_GP:
14692 fFlags |= IEM_XCPT_FLAGS_ERR;
14693 break;
14694 }
14695 break;
14696
14697 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14698 }
14699
14700 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14701
14702 if (pVCpu->iem.s.cActiveMappings > 0)
14703 iemMemRollback(pVCpu);
14704
14705 return rcStrict;
14706}
14707
14708
14709/**
14710 * Injects the active TRPM event.
14711 *
14712 * @returns Strict VBox status code.
14713 * @param pVCpu The cross context virtual CPU structure.
14714 */
14715VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14716{
14717#ifndef IEM_IMPLEMENTS_TASKSWITCH
14718 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14719#else
14720 uint8_t u8TrapNo;
14721 TRPMEVENT enmType;
14722 uint32_t uErrCode;
14723 RTGCUINTPTR uCr2;
14724 uint8_t cbInstr;
14725 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
14726 if (RT_FAILURE(rc))
14727 return rc;
14728
14729 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
14730 * ICEBP \#DB injection as a special case. */
14731 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14732#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14733 if (rcStrict == VINF_SVM_VMEXIT)
14734 rcStrict = VINF_SUCCESS;
14735#endif
14736#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14737 if (rcStrict == VINF_VMX_VMEXIT)
14738 rcStrict = VINF_SUCCESS;
14739#endif
14740 /** @todo Are there any other codes that imply the event was successfully
14741 * delivered to the guest? See @bugref{6607}. */
14742 if ( rcStrict == VINF_SUCCESS
14743 || rcStrict == VINF_IEM_RAISED_XCPT)
14744 TRPMResetTrap(pVCpu);
14745
14746 return rcStrict;
14747#endif
14748}
14749
14750
14751VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14752{
14753 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14754 return VERR_NOT_IMPLEMENTED;
14755}
14756
14757
14758VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14759{
14760 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14761 return VERR_NOT_IMPLEMENTED;
14762}
14763
14764
14765#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14766/**
14767 * Executes a IRET instruction with default operand size.
14768 *
14769 * This is for PATM.
14770 *
14771 * @returns VBox status code.
14772 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14773 * @param pCtxCore The register frame.
14774 */
14775VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14776{
14777 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14778
14779 iemCtxCoreToCtx(pCtx, pCtxCore);
14780 iemInitDecoder(pVCpu);
14781 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14782 if (rcStrict == VINF_SUCCESS)
14783 iemCtxToCtxCore(pCtxCore, pCtx);
14784 else
14785 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14786 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14787 return rcStrict;
14788}
14789#endif
14790
14791
14792/**
14793 * Macro used by the IEMExec* method to check the given instruction length.
14794 *
14795 * Will return on failure!
14796 *
14797 * @param a_cbInstr The given instruction length.
14798 * @param a_cbMin The minimum length.
14799 */
14800#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14801 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14802 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14803
14804
14805/**
14806 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14807 *
14808 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14809 *
14810 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14812 * @param rcStrict The status code to fiddle.
14813 */
14814DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14815{
14816 iemUninitExec(pVCpu);
14817 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14818}
14819
14820
14821/**
14822 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14823 *
14824 * This API ASSUMES that the caller has already verified that the guest code is
14825 * allowed to access the I/O port. (The I/O port is in the DX register in the
14826 * guest state.)
14827 *
14828 * @returns Strict VBox status code.
14829 * @param pVCpu The cross context virtual CPU structure.
14830 * @param cbValue The size of the I/O port access (1, 2, or 4).
14831 * @param enmAddrMode The addressing mode.
14832 * @param fRepPrefix Indicates whether a repeat prefix is used
14833 * (doesn't matter which for this instruction).
14834 * @param cbInstr The instruction length in bytes.
14835 * @param iEffSeg The effective segment address.
14836 * @param fIoChecked Whether the access to the I/O port has been
14837 * checked or not. It's typically checked in the
14838 * HM scenario.
14839 */
14840VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14841 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14842{
14843 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14844 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14845
14846 /*
14847 * State init.
14848 */
14849 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14850
14851 /*
14852 * Switch orgy for getting to the right handler.
14853 */
14854 VBOXSTRICTRC rcStrict;
14855 if (fRepPrefix)
14856 {
14857 switch (enmAddrMode)
14858 {
14859 case IEMMODE_16BIT:
14860 switch (cbValue)
14861 {
14862 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14863 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14864 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14865 default:
14866 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14867 }
14868 break;
14869
14870 case IEMMODE_32BIT:
14871 switch (cbValue)
14872 {
14873 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14874 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14875 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14876 default:
14877 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14878 }
14879 break;
14880
14881 case IEMMODE_64BIT:
14882 switch (cbValue)
14883 {
14884 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14885 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14886 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14887 default:
14888 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14889 }
14890 break;
14891
14892 default:
14893 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14894 }
14895 }
14896 else
14897 {
14898 switch (enmAddrMode)
14899 {
14900 case IEMMODE_16BIT:
14901 switch (cbValue)
14902 {
14903 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14904 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14905 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14906 default:
14907 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14908 }
14909 break;
14910
14911 case IEMMODE_32BIT:
14912 switch (cbValue)
14913 {
14914 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14915 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14916 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14917 default:
14918 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14919 }
14920 break;
14921
14922 case IEMMODE_64BIT:
14923 switch (cbValue)
14924 {
14925 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14926 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14927 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14928 default:
14929 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14930 }
14931 break;
14932
14933 default:
14934 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14935 }
14936 }
14937
14938 if (pVCpu->iem.s.cActiveMappings)
14939 iemMemRollback(pVCpu);
14940
14941 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14942}
14943
14944
14945/**
14946 * Interface for HM and EM for executing string I/O IN (read) instructions.
14947 *
14948 * This API ASSUMES that the caller has already verified that the guest code is
14949 * allowed to access the I/O port. (The I/O port is in the DX register in the
14950 * guest state.)
14951 *
14952 * @returns Strict VBox status code.
14953 * @param pVCpu The cross context virtual CPU structure.
14954 * @param cbValue The size of the I/O port access (1, 2, or 4).
14955 * @param enmAddrMode The addressing mode.
14956 * @param fRepPrefix Indicates whether a repeat prefix is used
14957 * (doesn't matter which for this instruction).
14958 * @param cbInstr The instruction length in bytes.
14959 * @param fIoChecked Whether the access to the I/O port has been
14960 * checked or not. It's typically checked in the
14961 * HM scenario.
14962 */
14963VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14964 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14965{
14966 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14967
14968 /*
14969 * State init.
14970 */
14971 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14972
14973 /*
14974 * Switch orgy for getting to the right handler.
14975 */
14976 VBOXSTRICTRC rcStrict;
14977 if (fRepPrefix)
14978 {
14979 switch (enmAddrMode)
14980 {
14981 case IEMMODE_16BIT:
14982 switch (cbValue)
14983 {
14984 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14985 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14986 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14987 default:
14988 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14989 }
14990 break;
14991
14992 case IEMMODE_32BIT:
14993 switch (cbValue)
14994 {
14995 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14996 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14997 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14998 default:
14999 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15000 }
15001 break;
15002
15003 case IEMMODE_64BIT:
15004 switch (cbValue)
15005 {
15006 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15007 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15008 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15009 default:
15010 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15011 }
15012 break;
15013
15014 default:
15015 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15016 }
15017 }
15018 else
15019 {
15020 switch (enmAddrMode)
15021 {
15022 case IEMMODE_16BIT:
15023 switch (cbValue)
15024 {
15025 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15026 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15027 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15028 default:
15029 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15030 }
15031 break;
15032
15033 case IEMMODE_32BIT:
15034 switch (cbValue)
15035 {
15036 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15037 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15038 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15039 default:
15040 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15041 }
15042 break;
15043
15044 case IEMMODE_64BIT:
15045 switch (cbValue)
15046 {
15047 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15048 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15049 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15050 default:
15051 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15052 }
15053 break;
15054
15055 default:
15056 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15057 }
15058 }
15059
15060 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15061 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15062}
15063
15064
15065/**
15066 * Interface for rawmode to write execute an OUT instruction.
15067 *
15068 * @returns Strict VBox status code.
15069 * @param pVCpu The cross context virtual CPU structure.
15070 * @param cbInstr The instruction length in bytes.
15071 * @param u16Port The port to read.
15072 * @param fImm Whether the port is specified using an immediate operand or
15073 * using the implicit DX register.
15074 * @param cbReg The register size.
15075 *
15076 * @remarks In ring-0 not all of the state needs to be synced in.
15077 */
15078VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15079{
15080 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15081 Assert(cbReg <= 4 && cbReg != 3);
15082
15083 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15084 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15085 Assert(!pVCpu->iem.s.cActiveMappings);
15086 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15087}
15088
15089
15090/**
15091 * Interface for rawmode to write execute an IN instruction.
15092 *
15093 * @returns Strict VBox status code.
15094 * @param pVCpu The cross context virtual CPU structure.
15095 * @param cbInstr The instruction length in bytes.
15096 * @param u16Port The port to read.
15097 * @param fImm Whether the port is specified using an immediate operand or
15098 * using the implicit DX.
15099 * @param cbReg The register size.
15100 */
15101VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15102{
15103 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15104 Assert(cbReg <= 4 && cbReg != 3);
15105
15106 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15107 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15108 Assert(!pVCpu->iem.s.cActiveMappings);
15109 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15110}
15111
15112
15113/**
15114 * Interface for HM and EM to write to a CRx register.
15115 *
15116 * @returns Strict VBox status code.
15117 * @param pVCpu The cross context virtual CPU structure.
15118 * @param cbInstr The instruction length in bytes.
15119 * @param iCrReg The control register number (destination).
15120 * @param iGReg The general purpose register number (source).
15121 *
15122 * @remarks In ring-0 not all of the state needs to be synced in.
15123 */
15124VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15125{
15126 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15127 Assert(iCrReg < 16);
15128 Assert(iGReg < 16);
15129
15130 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15131 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15132 Assert(!pVCpu->iem.s.cActiveMappings);
15133 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15134}
15135
15136
15137/**
15138 * Interface for HM and EM to read from a CRx register.
15139 *
15140 * @returns Strict VBox status code.
15141 * @param pVCpu The cross context virtual CPU structure.
15142 * @param cbInstr The instruction length in bytes.
15143 * @param iGReg The general purpose register number (destination).
15144 * @param iCrReg The control register number (source).
15145 *
15146 * @remarks In ring-0 not all of the state needs to be synced in.
15147 */
15148VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15149{
15150 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15151 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15152 | CPUMCTX_EXTRN_APIC_TPR);
15153 Assert(iCrReg < 16);
15154 Assert(iGReg < 16);
15155
15156 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15157 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15158 Assert(!pVCpu->iem.s.cActiveMappings);
15159 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15160}
15161
15162
15163/**
15164 * Interface for HM and EM to clear the CR0[TS] bit.
15165 *
15166 * @returns Strict VBox status code.
15167 * @param pVCpu The cross context virtual CPU structure.
15168 * @param cbInstr The instruction length in bytes.
15169 *
15170 * @remarks In ring-0 not all of the state needs to be synced in.
15171 */
15172VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15173{
15174 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15175
15176 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15177 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15178 Assert(!pVCpu->iem.s.cActiveMappings);
15179 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15180}
15181
15182
15183/**
15184 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15185 *
15186 * @returns Strict VBox status code.
15187 * @param pVCpu The cross context virtual CPU structure.
15188 * @param cbInstr The instruction length in bytes.
15189 * @param uValue The value to load into CR0.
15190 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15191 * memory operand. Otherwise pass NIL_RTGCPTR.
15192 *
15193 * @remarks In ring-0 not all of the state needs to be synced in.
15194 */
15195VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15196{
15197 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15198
15199 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15200 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15201 Assert(!pVCpu->iem.s.cActiveMappings);
15202 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15203}
15204
15205
15206/**
15207 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15208 *
15209 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15210 *
15211 * @returns Strict VBox status code.
15212 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15213 * @param cbInstr The instruction length in bytes.
15214 * @remarks In ring-0 not all of the state needs to be synced in.
15215 * @thread EMT(pVCpu)
15216 */
15217VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15218{
15219 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15220
15221 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15222 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15223 Assert(!pVCpu->iem.s.cActiveMappings);
15224 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15225}
15226
15227
15228/**
15229 * Interface for HM and EM to emulate the WBINVD instruction.
15230 *
15231 * @returns Strict VBox status code.
15232 * @param pVCpu The cross context virtual CPU structure.
15233 * @param cbInstr The instruction length in bytes.
15234 *
15235 * @remarks In ring-0 not all of the state needs to be synced in.
15236 */
15237VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15238{
15239 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15240
15241 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15242 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15243 Assert(!pVCpu->iem.s.cActiveMappings);
15244 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15245}
15246
15247
15248/**
15249 * Interface for HM and EM to emulate the INVD instruction.
15250 *
15251 * @returns Strict VBox status code.
15252 * @param pVCpu The cross context virtual CPU structure.
15253 * @param cbInstr The instruction length in bytes.
15254 *
15255 * @remarks In ring-0 not all of the state needs to be synced in.
15256 */
15257VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15258{
15259 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15260
15261 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15262 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15263 Assert(!pVCpu->iem.s.cActiveMappings);
15264 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15265}
15266
15267
15268/**
15269 * Interface for HM and EM to emulate the INVLPG instruction.
15270 *
15271 * @returns Strict VBox status code.
15272 * @retval VINF_PGM_SYNC_CR3
15273 *
15274 * @param pVCpu The cross context virtual CPU structure.
15275 * @param cbInstr The instruction length in bytes.
15276 * @param GCPtrPage The effective address of the page to invalidate.
15277 *
15278 * @remarks In ring-0 not all of the state needs to be synced in.
15279 */
15280VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15281{
15282 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15283
15284 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15285 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15286 Assert(!pVCpu->iem.s.cActiveMappings);
15287 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15288}
15289
15290
15291/**
15292 * Interface for HM and EM to emulate the INVPCID instruction.
15293 *
15294 * @returns Strict VBox status code.
15295 * @retval VINF_PGM_SYNC_CR3
15296 *
15297 * @param pVCpu The cross context virtual CPU structure.
15298 * @param cbInstr The instruction length in bytes.
15299 * @param iEffSeg The effective segment register.
15300 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15301 * @param uType The invalidation type.
15302 *
15303 * @remarks In ring-0 not all of the state needs to be synced in.
15304 */
15305VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15306 uint64_t uType)
15307{
15308 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15309
15310 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15311 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15312 Assert(!pVCpu->iem.s.cActiveMappings);
15313 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15314}
15315
15316
15317/**
15318 * Interface for HM and EM to emulate the CPUID instruction.
15319 *
15320 * @returns Strict VBox status code.
15321 *
15322 * @param pVCpu The cross context virtual CPU structure.
15323 * @param cbInstr The instruction length in bytes.
15324 *
15325 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15326 */
15327VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15328{
15329 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15330 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15331
15332 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15333 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15334 Assert(!pVCpu->iem.s.cActiveMappings);
15335 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15336}
15337
15338
15339/**
15340 * Interface for HM and EM to emulate the RDPMC instruction.
15341 *
15342 * @returns Strict VBox status code.
15343 *
15344 * @param pVCpu The cross context virtual CPU structure.
15345 * @param cbInstr The instruction length in bytes.
15346 *
15347 * @remarks Not all of the state needs to be synced in.
15348 */
15349VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15350{
15351 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15352 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15353
15354 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15355 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15356 Assert(!pVCpu->iem.s.cActiveMappings);
15357 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15358}
15359
15360
15361/**
15362 * Interface for HM and EM to emulate the RDTSC instruction.
15363 *
15364 * @returns Strict VBox status code.
15365 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15366 *
15367 * @param pVCpu The cross context virtual CPU structure.
15368 * @param cbInstr The instruction length in bytes.
15369 *
15370 * @remarks Not all of the state needs to be synced in.
15371 */
15372VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15373{
15374 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15375 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15376
15377 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15378 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15379 Assert(!pVCpu->iem.s.cActiveMappings);
15380 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15381}
15382
15383
15384/**
15385 * Interface for HM and EM to emulate the RDTSCP instruction.
15386 *
15387 * @returns Strict VBox status code.
15388 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15389 *
15390 * @param pVCpu The cross context virtual CPU structure.
15391 * @param cbInstr The instruction length in bytes.
15392 *
15393 * @remarks Not all of the state needs to be synced in. Recommended
15394 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15395 */
15396VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15397{
15398 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15399 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15400
15401 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15402 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15403 Assert(!pVCpu->iem.s.cActiveMappings);
15404 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15405}
15406
15407
15408/**
15409 * Interface for HM and EM to emulate the RDMSR instruction.
15410 *
15411 * @returns Strict VBox status code.
15412 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15413 *
15414 * @param pVCpu The cross context virtual CPU structure.
15415 * @param cbInstr The instruction length in bytes.
15416 *
15417 * @remarks Not all of the state needs to be synced in. Requires RCX and
15418 * (currently) all MSRs.
15419 */
15420VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15421{
15422 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15423 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15424
15425 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15426 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15427 Assert(!pVCpu->iem.s.cActiveMappings);
15428 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15429}
15430
15431
15432/**
15433 * Interface for HM and EM to emulate the WRMSR instruction.
15434 *
15435 * @returns Strict VBox status code.
15436 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15437 *
15438 * @param pVCpu The cross context virtual CPU structure.
15439 * @param cbInstr The instruction length in bytes.
15440 *
15441 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15442 * and (currently) all MSRs.
15443 */
15444VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15445{
15446 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15447 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15448 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15449
15450 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15451 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15452 Assert(!pVCpu->iem.s.cActiveMappings);
15453 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15454}
15455
15456
15457/**
15458 * Interface for HM and EM to emulate the MONITOR instruction.
15459 *
15460 * @returns Strict VBox status code.
15461 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15462 *
15463 * @param pVCpu The cross context virtual CPU structure.
15464 * @param cbInstr The instruction length in bytes.
15465 *
15466 * @remarks Not all of the state needs to be synced in.
15467 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15468 * are used.
15469 */
15470VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15471{
15472 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15473 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15474
15475 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15476 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15477 Assert(!pVCpu->iem.s.cActiveMappings);
15478 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15479}
15480
15481
15482/**
15483 * Interface for HM and EM to emulate the MWAIT instruction.
15484 *
15485 * @returns Strict VBox status code.
15486 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15487 *
15488 * @param pVCpu The cross context virtual CPU structure.
15489 * @param cbInstr The instruction length in bytes.
15490 *
15491 * @remarks Not all of the state needs to be synced in.
15492 */
15493VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15494{
15495 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15496 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15497
15498 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15499 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15500 Assert(!pVCpu->iem.s.cActiveMappings);
15501 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15502}
15503
15504
15505/**
15506 * Interface for HM and EM to emulate the HLT instruction.
15507 *
15508 * @returns Strict VBox status code.
15509 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15510 *
15511 * @param pVCpu The cross context virtual CPU structure.
15512 * @param cbInstr The instruction length in bytes.
15513 *
15514 * @remarks Not all of the state needs to be synced in.
15515 */
15516VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15517{
15518 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15519
15520 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15521 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15522 Assert(!pVCpu->iem.s.cActiveMappings);
15523 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15524}
15525
15526
15527/**
15528 * Checks if IEM is in the process of delivering an event (interrupt or
15529 * exception).
15530 *
15531 * @returns true if we're in the process of raising an interrupt or exception,
15532 * false otherwise.
15533 * @param pVCpu The cross context virtual CPU structure.
15534 * @param puVector Where to store the vector associated with the
15535 * currently delivered event, optional.
15536 * @param pfFlags Where to store th event delivery flags (see
15537 * IEM_XCPT_FLAGS_XXX), optional.
15538 * @param puErr Where to store the error code associated with the
15539 * event, optional.
15540 * @param puCr2 Where to store the CR2 associated with the event,
15541 * optional.
15542 * @remarks The caller should check the flags to determine if the error code and
15543 * CR2 are valid for the event.
15544 */
15545VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15546{
15547 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15548 if (fRaisingXcpt)
15549 {
15550 if (puVector)
15551 *puVector = pVCpu->iem.s.uCurXcpt;
15552 if (pfFlags)
15553 *pfFlags = pVCpu->iem.s.fCurXcpt;
15554 if (puErr)
15555 *puErr = pVCpu->iem.s.uCurXcptErr;
15556 if (puCr2)
15557 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15558 }
15559 return fRaisingXcpt;
15560}
15561
15562#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15563
15564/**
15565 * Interface for HM and EM to emulate the CLGI instruction.
15566 *
15567 * @returns Strict VBox status code.
15568 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15569 * @param cbInstr The instruction length in bytes.
15570 * @thread EMT(pVCpu)
15571 */
15572VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15573{
15574 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15575
15576 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15577 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15578 Assert(!pVCpu->iem.s.cActiveMappings);
15579 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15580}
15581
15582
15583/**
15584 * Interface for HM and EM to emulate the STGI instruction.
15585 *
15586 * @returns Strict VBox status code.
15587 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15588 * @param cbInstr The instruction length in bytes.
15589 * @thread EMT(pVCpu)
15590 */
15591VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15592{
15593 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15594
15595 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15596 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15597 Assert(!pVCpu->iem.s.cActiveMappings);
15598 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15599}
15600
15601
15602/**
15603 * Interface for HM and EM to emulate the VMLOAD instruction.
15604 *
15605 * @returns Strict VBox status code.
15606 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15607 * @param cbInstr The instruction length in bytes.
15608 * @thread EMT(pVCpu)
15609 */
15610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15611{
15612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15613
15614 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15615 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15616 Assert(!pVCpu->iem.s.cActiveMappings);
15617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15618}
15619
15620
15621/**
15622 * Interface for HM and EM to emulate the VMSAVE instruction.
15623 *
15624 * @returns Strict VBox status code.
15625 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15626 * @param cbInstr The instruction length in bytes.
15627 * @thread EMT(pVCpu)
15628 */
15629VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15630{
15631 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15632
15633 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15634 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15635 Assert(!pVCpu->iem.s.cActiveMappings);
15636 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15637}
15638
15639
15640/**
15641 * Interface for HM and EM to emulate the INVLPGA instruction.
15642 *
15643 * @returns Strict VBox status code.
15644 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15645 * @param cbInstr The instruction length in bytes.
15646 * @thread EMT(pVCpu)
15647 */
15648VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15649{
15650 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15651
15652 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15653 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15654 Assert(!pVCpu->iem.s.cActiveMappings);
15655 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15656}
15657
15658
15659/**
15660 * Interface for HM and EM to emulate the VMRUN instruction.
15661 *
15662 * @returns Strict VBox status code.
15663 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15664 * @param cbInstr The instruction length in bytes.
15665 * @thread EMT(pVCpu)
15666 */
15667VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15668{
15669 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15670 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15671
15672 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15673 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15674 Assert(!pVCpu->iem.s.cActiveMappings);
15675 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15676}
15677
15678
15679/**
15680 * Interface for HM and EM to emulate \#VMEXIT.
15681 *
15682 * @returns Strict VBox status code.
15683 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15684 * @param uExitCode The exit code.
15685 * @param uExitInfo1 The exit info. 1 field.
15686 * @param uExitInfo2 The exit info. 2 field.
15687 * @thread EMT(pVCpu)
15688 */
15689VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15690{
15691 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15692 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15693 if (pVCpu->iem.s.cActiveMappings)
15694 iemMemRollback(pVCpu);
15695 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15696}
15697
15698#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15699
15700#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15701
15702/**
15703 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15704 *
15705 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15706 * are performed. Bounds checks are strict builds only.
15707 *
15708 * @param pVmcs Pointer to the virtual VMCS.
15709 * @param u64VmcsField The VMCS field.
15710 * @param pu64Dst Where to store the VMCS value.
15711 *
15712 * @remarks May be called with interrupts disabled.
15713 * @todo This should probably be moved to CPUM someday.
15714 */
15715VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15716{
15717 AssertPtr(pVmcs);
15718 AssertPtr(pu64Dst);
15719 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15720}
15721
15722
15723/**
15724 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15725 *
15726 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15727 * are performed. Bounds checks are strict builds only.
15728 *
15729 * @param pVmcs Pointer to the virtual VMCS.
15730 * @param u64VmcsField The VMCS field.
15731 * @param u64Val The value to write.
15732 *
15733 * @remarks May be called with interrupts disabled.
15734 * @todo This should probably be moved to CPUM someday.
15735 */
15736VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15737{
15738 AssertPtr(pVmcs);
15739 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15740}
15741
15742
15743/**
15744 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15745 *
15746 * @returns Strict VBox status code.
15747 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15748 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15749 * the x2APIC device.
15750 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15751 *
15752 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15753 * @param idMsr The MSR being read.
15754 * @param pu64Value Pointer to the value being written or where to store the
15755 * value being read.
15756 * @param fWrite Whether this is an MSR write or read access.
15757 * @thread EMT(pVCpu)
15758 */
15759VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15760{
15761 Assert(pu64Value);
15762
15763 VBOXSTRICTRC rcStrict;
15764 if (fWrite)
15765 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15766 else
15767 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15768 Assert(!pVCpu->iem.s.cActiveMappings);
15769 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15770
15771}
15772
15773
15774/**
15775 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15776 *
15777 * @returns Strict VBox status code.
15778 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15779 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15780 *
15781 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15782 * @param pExitInfo Pointer to the VM-exit information.
15783 * @param pExitEventInfo Pointer to the VM-exit event information.
15784 * @thread EMT(pVCpu)
15785 */
15786VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15787{
15788 Assert(pExitInfo);
15789 Assert(pExitEventInfo);
15790 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15791 Assert(!pVCpu->iem.s.cActiveMappings);
15792 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15793
15794}
15795
15796
15797/**
15798 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15799 * VM-exit.
15800 *
15801 * @returns Strict VBox status code.
15802 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15803 * @thread EMT(pVCpu)
15804 */
15805VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15806{
15807 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15808 Assert(!pVCpu->iem.s.cActiveMappings);
15809 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15810}
15811
15812
15813/**
15814 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15815 *
15816 * @returns Strict VBox status code.
15817 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15818 * @thread EMT(pVCpu)
15819 */
15820VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15821{
15822 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15823 Assert(!pVCpu->iem.s.cActiveMappings);
15824 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15825}
15826
15827
15828/**
15829 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15830 *
15831 * @returns Strict VBox status code.
15832 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15833 * @param uVector The external interrupt vector (pass 0 if the external
15834 * interrupt is still pending).
15835 * @param fIntPending Whether the external interrupt is pending or
15836 * acknowdledged in the interrupt controller.
15837 * @thread EMT(pVCpu)
15838 */
15839VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15840{
15841 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15842 Assert(!pVCpu->iem.s.cActiveMappings);
15843 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15844}
15845
15846
15847/**
15848 * Interface for HM and EM to emulate VM-exit due to exceptions.
15849 *
15850 * Exception includes NMIs, software exceptions (those generated by INT3 or
15851 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15852 *
15853 * @returns Strict VBox status code.
15854 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15855 * @param pExitInfo Pointer to the VM-exit information.
15856 * @param pExitEventInfo Pointer to the VM-exit event information.
15857 * @thread EMT(pVCpu)
15858 */
15859VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15860{
15861 Assert(pExitInfo);
15862 Assert(pExitEventInfo);
15863 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15864 Assert(!pVCpu->iem.s.cActiveMappings);
15865 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15866}
15867
15868
15869/**
15870 * Interface for HM and EM to emulate VM-exit due to NMIs.
15871 *
15872 * @returns Strict VBox status code.
15873 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15874 * @thread EMT(pVCpu)
15875 */
15876VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15877{
15878 VMXVEXITINFO ExitInfo;
15879 RT_ZERO(ExitInfo);
15880 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
15881
15882 VMXVEXITEVENTINFO ExitEventInfo;
15883 RT_ZERO(ExitEventInfo);
15884 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15885 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15886 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15887
15888 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15889 Assert(!pVCpu->iem.s.cActiveMappings);
15890 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15891}
15892
15893
15894/**
15895 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15896 *
15897 * @returns Strict VBox status code.
15898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15899 * @thread EMT(pVCpu)
15900 */
15901VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15902{
15903 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15904 Assert(!pVCpu->iem.s.cActiveMappings);
15905 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15906}
15907
15908
15909/**
15910 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15911 *
15912 * @returns Strict VBox status code.
15913 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15914 * @param uVector The SIPI vector.
15915 * @thread EMT(pVCpu)
15916 */
15917VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15918{
15919 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15920 Assert(!pVCpu->iem.s.cActiveMappings);
15921 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15922}
15923
15924
15925/**
15926 * Interface for HM and EM to emulate a VM-exit.
15927 *
15928 * If a specialized version of a VM-exit handler exists, that must be used instead.
15929 *
15930 * @returns Strict VBox status code.
15931 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15932 * @param uExitReason The VM-exit reason.
15933 * @param u64ExitQual The Exit qualification.
15934 * @thread EMT(pVCpu)
15935 */
15936VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15937{
15938 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15939 Assert(!pVCpu->iem.s.cActiveMappings);
15940 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15941}
15942
15943
15944/**
15945 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15946 *
15947 * This is meant to be used for those instructions that VMX provides additional
15948 * decoding information beyond just the instruction length!
15949 *
15950 * @returns Strict VBox status code.
15951 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15952 * @param pExitInfo Pointer to the VM-exit information.
15953 * @thread EMT(pVCpu)
15954 */
15955VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15956{
15957 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15958 Assert(!pVCpu->iem.s.cActiveMappings);
15959 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15960}
15961
15962
15963/**
15964 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15965 *
15966 * This is meant to be used for those instructions that VMX provides only the
15967 * instruction length.
15968 *
15969 * @returns Strict VBox status code.
15970 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15971 * @param pExitInfo Pointer to the VM-exit information.
15972 * @param cbInstr The instruction length in bytes.
15973 * @thread EMT(pVCpu)
15974 */
15975VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15976{
15977 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15978 Assert(!pVCpu->iem.s.cActiveMappings);
15979 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15980}
15981
15982
15983/**
15984 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
15985 * Virtualized-EOI, TPR-below threshold).
15986 *
15987 * @returns Strict VBox status code.
15988 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15989 * @param pExitInfo Pointer to the VM-exit information.
15990 * @thread EMT(pVCpu)
15991 */
15992VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15993{
15994 Assert(pExitInfo);
15995 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
15996 Assert(!pVCpu->iem.s.cActiveMappings);
15997 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15998}
15999
16000
16001/**
16002 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16003 *
16004 * @returns Strict VBox status code.
16005 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16006 * @param pExitInfo Pointer to the VM-exit information.
16007 * @param pExitEventInfo Pointer to the VM-exit event information.
16008 * @thread EMT(pVCpu)
16009 */
16010VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16011{
16012 Assert(pExitInfo);
16013 Assert(pExitEventInfo);
16014 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16015 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16016 Assert(!pVCpu->iem.s.cActiveMappings);
16017 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16018}
16019
16020
16021/**
16022 * Interface for HM and EM to emulate the VMREAD instruction.
16023 *
16024 * @returns Strict VBox status code.
16025 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16026 * @param pExitInfo Pointer to the VM-exit information.
16027 * @thread EMT(pVCpu)
16028 */
16029VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16030{
16031 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16032 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16033 Assert(pExitInfo);
16034
16035 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16036
16037 VBOXSTRICTRC rcStrict;
16038 uint8_t const cbInstr = pExitInfo->cbInstr;
16039 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16040 uint64_t const u64FieldEnc = fIs64BitMode
16041 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16042 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16043 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16044 {
16045 if (fIs64BitMode)
16046 {
16047 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16048 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16049 }
16050 else
16051 {
16052 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16053 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16054 }
16055 }
16056 else
16057 {
16058 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16059 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16060 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16061 }
16062 Assert(!pVCpu->iem.s.cActiveMappings);
16063 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16064}
16065
16066
16067/**
16068 * Interface for HM and EM to emulate the VMWRITE instruction.
16069 *
16070 * @returns Strict VBox status code.
16071 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16072 * @param pExitInfo Pointer to the VM-exit information.
16073 * @thread EMT(pVCpu)
16074 */
16075VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16076{
16077 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16078 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16079 Assert(pExitInfo);
16080
16081 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16082
16083 uint64_t u64Val;
16084 uint8_t iEffSeg;
16085 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16086 {
16087 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16088 iEffSeg = UINT8_MAX;
16089 }
16090 else
16091 {
16092 u64Val = pExitInfo->GCPtrEffAddr;
16093 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16094 }
16095 uint8_t const cbInstr = pExitInfo->cbInstr;
16096 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16097 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16098 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16099 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16100 Assert(!pVCpu->iem.s.cActiveMappings);
16101 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16102}
16103
16104
16105/**
16106 * Interface for HM and EM to emulate the VMPTRLD instruction.
16107 *
16108 * @returns Strict VBox status code.
16109 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16110 * @param pExitInfo Pointer to the VM-exit information.
16111 * @thread EMT(pVCpu)
16112 */
16113VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16114{
16115 Assert(pExitInfo);
16116 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16117 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16118
16119 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16120
16121 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16122 uint8_t const cbInstr = pExitInfo->cbInstr;
16123 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16124 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16125 Assert(!pVCpu->iem.s.cActiveMappings);
16126 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16127}
16128
16129
16130/**
16131 * Interface for HM and EM to emulate the VMPTRST instruction.
16132 *
16133 * @returns Strict VBox status code.
16134 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16135 * @param pExitInfo Pointer to the VM-exit information.
16136 * @thread EMT(pVCpu)
16137 */
16138VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16139{
16140 Assert(pExitInfo);
16141 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16142 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16143
16144 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16145
16146 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16147 uint8_t const cbInstr = pExitInfo->cbInstr;
16148 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16149 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16150 Assert(!pVCpu->iem.s.cActiveMappings);
16151 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16152}
16153
16154
16155/**
16156 * Interface for HM and EM to emulate the VMCLEAR instruction.
16157 *
16158 * @returns Strict VBox status code.
16159 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16160 * @param pExitInfo Pointer to the VM-exit information.
16161 * @thread EMT(pVCpu)
16162 */
16163VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16164{
16165 Assert(pExitInfo);
16166 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16167 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16168
16169 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16170
16171 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16172 uint8_t const cbInstr = pExitInfo->cbInstr;
16173 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16174 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16175 Assert(!pVCpu->iem.s.cActiveMappings);
16176 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16177}
16178
16179
16180/**
16181 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16182 *
16183 * @returns Strict VBox status code.
16184 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16185 * @param cbInstr The instruction length in bytes.
16186 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16187 * VMXINSTRID_VMRESUME).
16188 * @thread EMT(pVCpu)
16189 */
16190VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16191{
16192 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16193 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16194
16195 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16196 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16197 Assert(!pVCpu->iem.s.cActiveMappings);
16198 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16199}
16200
16201
16202/**
16203 * Interface for HM and EM to emulate the VMXON instruction.
16204 *
16205 * @returns Strict VBox status code.
16206 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16207 * @param pExitInfo Pointer to the VM-exit information.
16208 * @thread EMT(pVCpu)
16209 */
16210VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16211{
16212 Assert(pExitInfo);
16213 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16214 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16215
16216 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16217
16218 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16219 uint8_t const cbInstr = pExitInfo->cbInstr;
16220 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16221 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16222 Assert(!pVCpu->iem.s.cActiveMappings);
16223 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16224}
16225
16226
16227/**
16228 * Interface for HM and EM to emulate the VMXOFF instruction.
16229 *
16230 * @returns Strict VBox status code.
16231 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16232 * @param cbInstr The instruction length in bytes.
16233 * @thread EMT(pVCpu)
16234 */
16235VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16236{
16237 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16238 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16239
16240 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16241 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16242 Assert(!pVCpu->iem.s.cActiveMappings);
16243 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16244}
16245
16246
16247/**
16248 * Interface for HM and EM to emulate the INVVPID instruction.
16249 *
16250 * @returns Strict VBox status code.
16251 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16252 * @param pExitInfo Pointer to the VM-exit information.
16253 * @thread EMT(pVCpu)
16254 */
16255VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16256{
16257 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16258 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16259 Assert(pExitInfo);
16260
16261 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16262
16263 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16264 uint8_t const cbInstr = pExitInfo->cbInstr;
16265 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16266 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16267 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16268 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16269 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16270 Assert(!pVCpu->iem.s.cActiveMappings);
16271 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16272}
16273
16274
16275/**
16276 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16277 *
16278 * @remarks The @a pvUser argument is currently unused.
16279 */
16280PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16281 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16282 PGMACCESSORIGIN enmOrigin, void *pvUser)
16283{
16284 RT_NOREF3(pvPhys, enmOrigin, pvUser);
16285
16286 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16287 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16288 {
16289 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16290 Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16291
16292 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16293 * Currently they will go through as read accesses. */
16294 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16295 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16296 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16297 if (RT_FAILURE(rcStrict))
16298 return rcStrict;
16299
16300 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16301 return VINF_SUCCESS;
16302 }
16303
16304 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16305 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16306 if (RT_FAILURE(rc))
16307 return rc;
16308
16309 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16310 return VINF_PGM_HANDLER_DO_DEFAULT;
16311}
16312
16313#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16314
16315#ifdef IN_RING3
16316
16317/**
16318 * Handles the unlikely and probably fatal merge cases.
16319 *
16320 * @returns Merged status code.
16321 * @param rcStrict Current EM status code.
16322 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16323 * with @a rcStrict.
16324 * @param iMemMap The memory mapping index. For error reporting only.
16325 * @param pVCpu The cross context virtual CPU structure of the calling
16326 * thread, for error reporting only.
16327 */
16328DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16329 unsigned iMemMap, PVMCPUCC pVCpu)
16330{
16331 if (RT_FAILURE_NP(rcStrict))
16332 return rcStrict;
16333
16334 if (RT_FAILURE_NP(rcStrictCommit))
16335 return rcStrictCommit;
16336
16337 if (rcStrict == rcStrictCommit)
16338 return rcStrictCommit;
16339
16340 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16341 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16342 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16343 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16344 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16345 return VERR_IOM_FF_STATUS_IPE;
16346}
16347
16348
16349/**
16350 * Helper for IOMR3ProcessForceFlag.
16351 *
16352 * @returns Merged status code.
16353 * @param rcStrict Current EM status code.
16354 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16355 * with @a rcStrict.
16356 * @param iMemMap The memory mapping index. For error reporting only.
16357 * @param pVCpu The cross context virtual CPU structure of the calling
16358 * thread, for error reporting only.
16359 */
16360DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16361{
16362 /* Simple. */
16363 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16364 return rcStrictCommit;
16365
16366 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16367 return rcStrict;
16368
16369 /* EM scheduling status codes. */
16370 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16371 && rcStrict <= VINF_EM_LAST))
16372 {
16373 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16374 && rcStrictCommit <= VINF_EM_LAST))
16375 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16376 }
16377
16378 /* Unlikely */
16379 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16380}
16381
16382
16383/**
16384 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16385 *
16386 * @returns Merge between @a rcStrict and what the commit operation returned.
16387 * @param pVM The cross context VM structure.
16388 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16389 * @param rcStrict The status code returned by ring-0 or raw-mode.
16390 */
16391VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16392{
16393 /*
16394 * Reset the pending commit.
16395 */
16396 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16397 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16398 ("%#x %#x %#x\n",
16399 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16400 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16401
16402 /*
16403 * Commit the pending bounce buffers (usually just one).
16404 */
16405 unsigned cBufs = 0;
16406 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16407 while (iMemMap-- > 0)
16408 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16409 {
16410 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16411 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16412 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16413
16414 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16415 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16416 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16417
16418 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16419 {
16420 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16422 pbBuf,
16423 cbFirst,
16424 PGMACCESSORIGIN_IEM);
16425 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16426 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16427 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16428 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16429 }
16430
16431 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16432 {
16433 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16435 pbBuf + cbFirst,
16436 cbSecond,
16437 PGMACCESSORIGIN_IEM);
16438 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16439 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16440 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16441 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16442 }
16443 cBufs++;
16444 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16445 }
16446
16447 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16448 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16449 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16450 pVCpu->iem.s.cActiveMappings = 0;
16451 return rcStrict;
16452}
16453
16454#endif /* IN_RING3 */
16455
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette