VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 80020

Last change on this file since 80020 was 80020, checked in by vboxsync, 6 years ago

VMM: Kicking out raw-mode (work in progress) - vm.h. bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 653.6 KB
Line 
1/* $Id: IEMAll.cpp 80020 2019-07-26 18:49:57Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vm.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler.
442 */
443# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
444 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
445
446#else
447# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
448# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
450# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
453# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
457
458#endif
459
460#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
461/**
462 * Check if an SVM control/instruction intercept is set.
463 */
464# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
465 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
466
467/**
468 * Check if an SVM read CRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
471 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
472
473/**
474 * Check if an SVM write CRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM read DRx intercept is set.
481 */
482# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
483 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
484
485/**
486 * Check if an SVM write DRx intercept is set.
487 */
488# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM exception intercept is set.
493 */
494# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
495 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
496
497/**
498 * Invokes the SVM \#VMEXIT handler for the nested-guest.
499 */
500# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
501 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
502
503/**
504 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
505 * corresponding decode assist information.
506 */
507# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
508 do \
509 { \
510 uint64_t uExitInfo1; \
511 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
512 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
513 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
514 else \
515 uExitInfo1 = 0; \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
517 } while (0)
518
519/** Check and handles SVM nested-guest instruction intercept and updates
520 * NRIP if needed.
521 */
522# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
523 do \
524 { \
525 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
526 { \
527 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
528 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
529 } \
530 } while (0)
531
532/** Checks and handles SVM nested-guest CR0 read intercept. */
533# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
534 do \
535 { \
536 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
537 { /* probably likely */ } \
538 else \
539 { \
540 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
541 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
542 } \
543 } while (0)
544
545/**
546 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
547 */
548# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
549 do { \
550 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
551 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
552 } while (0)
553
554#else
555# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
556# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
557# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
559# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
561# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
562# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
564# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
566
567#endif
568
569
570/*********************************************************************************************************************************
571* Global Variables *
572*********************************************************************************************************************************/
573extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
574
575
576/** Function table for the ADD instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
578{
579 iemAImpl_add_u8, iemAImpl_add_u8_locked,
580 iemAImpl_add_u16, iemAImpl_add_u16_locked,
581 iemAImpl_add_u32, iemAImpl_add_u32_locked,
582 iemAImpl_add_u64, iemAImpl_add_u64_locked
583};
584
585/** Function table for the ADC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
587{
588 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
589 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
590 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
591 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
592};
593
594/** Function table for the SUB instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
596{
597 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
598 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
599 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
600 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
601};
602
603/** Function table for the SBB instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
605{
606 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
607 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
608 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
609 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
610};
611
612/** Function table for the OR instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
614{
615 iemAImpl_or_u8, iemAImpl_or_u8_locked,
616 iemAImpl_or_u16, iemAImpl_or_u16_locked,
617 iemAImpl_or_u32, iemAImpl_or_u32_locked,
618 iemAImpl_or_u64, iemAImpl_or_u64_locked
619};
620
621/** Function table for the XOR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
623{
624 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
625 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
626 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
627 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
628};
629
630/** Function table for the AND instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
632{
633 iemAImpl_and_u8, iemAImpl_and_u8_locked,
634 iemAImpl_and_u16, iemAImpl_and_u16_locked,
635 iemAImpl_and_u32, iemAImpl_and_u32_locked,
636 iemAImpl_and_u64, iemAImpl_and_u64_locked
637};
638
639/** Function table for the CMP instruction.
640 * @remarks Making operand order ASSUMPTIONS.
641 */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
643{
644 iemAImpl_cmp_u8, NULL,
645 iemAImpl_cmp_u16, NULL,
646 iemAImpl_cmp_u32, NULL,
647 iemAImpl_cmp_u64, NULL
648};
649
650/** Function table for the TEST instruction.
651 * @remarks Making operand order ASSUMPTIONS.
652 */
653IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
654{
655 iemAImpl_test_u8, NULL,
656 iemAImpl_test_u16, NULL,
657 iemAImpl_test_u32, NULL,
658 iemAImpl_test_u64, NULL
659};
660
661/** Function table for the BT instruction. */
662IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
663{
664 NULL, NULL,
665 iemAImpl_bt_u16, NULL,
666 iemAImpl_bt_u32, NULL,
667 iemAImpl_bt_u64, NULL
668};
669
670/** Function table for the BTC instruction. */
671IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
672{
673 NULL, NULL,
674 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
675 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
676 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
677};
678
679/** Function table for the BTR instruction. */
680IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
681{
682 NULL, NULL,
683 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
684 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
685 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
686};
687
688/** Function table for the BTS instruction. */
689IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
690{
691 NULL, NULL,
692 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
693 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
694 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
695};
696
697/** Function table for the BSF instruction. */
698IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
699{
700 NULL, NULL,
701 iemAImpl_bsf_u16, NULL,
702 iemAImpl_bsf_u32, NULL,
703 iemAImpl_bsf_u64, NULL
704};
705
706/** Function table for the BSR instruction. */
707IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
708{
709 NULL, NULL,
710 iemAImpl_bsr_u16, NULL,
711 iemAImpl_bsr_u32, NULL,
712 iemAImpl_bsr_u64, NULL
713};
714
715/** Function table for the IMUL instruction. */
716IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
717{
718 NULL, NULL,
719 iemAImpl_imul_two_u16, NULL,
720 iemAImpl_imul_two_u32, NULL,
721 iemAImpl_imul_two_u64, NULL
722};
723
724/** Group 1 /r lookup table. */
725IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
726{
727 &g_iemAImpl_add,
728 &g_iemAImpl_or,
729 &g_iemAImpl_adc,
730 &g_iemAImpl_sbb,
731 &g_iemAImpl_and,
732 &g_iemAImpl_sub,
733 &g_iemAImpl_xor,
734 &g_iemAImpl_cmp
735};
736
737/** Function table for the INC instruction. */
738IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
739{
740 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
741 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
742 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
743 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
744};
745
746/** Function table for the DEC instruction. */
747IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
748{
749 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
750 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
751 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
752 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
753};
754
755/** Function table for the NEG instruction. */
756IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
757{
758 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
759 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
760 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
761 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
762};
763
764/** Function table for the NOT instruction. */
765IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
766{
767 iemAImpl_not_u8, iemAImpl_not_u8_locked,
768 iemAImpl_not_u16, iemAImpl_not_u16_locked,
769 iemAImpl_not_u32, iemAImpl_not_u32_locked,
770 iemAImpl_not_u64, iemAImpl_not_u64_locked
771};
772
773
774/** Function table for the ROL instruction. */
775IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
776{
777 iemAImpl_rol_u8,
778 iemAImpl_rol_u16,
779 iemAImpl_rol_u32,
780 iemAImpl_rol_u64
781};
782
783/** Function table for the ROR instruction. */
784IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
785{
786 iemAImpl_ror_u8,
787 iemAImpl_ror_u16,
788 iemAImpl_ror_u32,
789 iemAImpl_ror_u64
790};
791
792/** Function table for the RCL instruction. */
793IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
794{
795 iemAImpl_rcl_u8,
796 iemAImpl_rcl_u16,
797 iemAImpl_rcl_u32,
798 iemAImpl_rcl_u64
799};
800
801/** Function table for the RCR instruction. */
802IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
803{
804 iemAImpl_rcr_u8,
805 iemAImpl_rcr_u16,
806 iemAImpl_rcr_u32,
807 iemAImpl_rcr_u64
808};
809
810/** Function table for the SHL instruction. */
811IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
812{
813 iemAImpl_shl_u8,
814 iemAImpl_shl_u16,
815 iemAImpl_shl_u32,
816 iemAImpl_shl_u64
817};
818
819/** Function table for the SHR instruction. */
820IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
821{
822 iemAImpl_shr_u8,
823 iemAImpl_shr_u16,
824 iemAImpl_shr_u32,
825 iemAImpl_shr_u64
826};
827
828/** Function table for the SAR instruction. */
829IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
830{
831 iemAImpl_sar_u8,
832 iemAImpl_sar_u16,
833 iemAImpl_sar_u32,
834 iemAImpl_sar_u64
835};
836
837
838/** Function table for the MUL instruction. */
839IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
840{
841 iemAImpl_mul_u8,
842 iemAImpl_mul_u16,
843 iemAImpl_mul_u32,
844 iemAImpl_mul_u64
845};
846
847/** Function table for the IMUL instruction working implicitly on rAX. */
848IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
849{
850 iemAImpl_imul_u8,
851 iemAImpl_imul_u16,
852 iemAImpl_imul_u32,
853 iemAImpl_imul_u64
854};
855
856/** Function table for the DIV instruction. */
857IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
858{
859 iemAImpl_div_u8,
860 iemAImpl_div_u16,
861 iemAImpl_div_u32,
862 iemAImpl_div_u64
863};
864
865/** Function table for the MUL instruction. */
866IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
867{
868 iemAImpl_idiv_u8,
869 iemAImpl_idiv_u16,
870 iemAImpl_idiv_u32,
871 iemAImpl_idiv_u64
872};
873
874/** Function table for the SHLD instruction */
875IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
876{
877 iemAImpl_shld_u16,
878 iemAImpl_shld_u32,
879 iemAImpl_shld_u64,
880};
881
882/** Function table for the SHRD instruction */
883IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
884{
885 iemAImpl_shrd_u16,
886 iemAImpl_shrd_u32,
887 iemAImpl_shrd_u64,
888};
889
890
891/** Function table for the PUNPCKLBW instruction */
892IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
893/** Function table for the PUNPCKLBD instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
895/** Function table for the PUNPCKLDQ instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
897/** Function table for the PUNPCKLQDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
899
900/** Function table for the PUNPCKHBW instruction */
901IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
902/** Function table for the PUNPCKHBD instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
904/** Function table for the PUNPCKHDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
906/** Function table for the PUNPCKHQDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
908
909/** Function table for the PXOR instruction */
910IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
911/** Function table for the PCMPEQB instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
913/** Function table for the PCMPEQW instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
915/** Function table for the PCMPEQD instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
917
918
919#if defined(IEM_LOG_MEMORY_WRITES)
920/** What IEM just wrote. */
921uint8_t g_abIemWrote[256];
922/** How much IEM just wrote. */
923size_t g_cbIemWrote;
924#endif
925
926
927/*********************************************************************************************************************************
928* Internal Functions *
929*********************************************************************************************************************************/
930IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
934/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
935IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
946IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
947#ifdef IEM_WITH_SETJMP
948DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
953#endif
954
955IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
956IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
965IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
969IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
970IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
971IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
972
973#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
974IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
975IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
978IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
981#endif
982
983#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
984IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
985IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
986#endif
987
988
989/**
990 * Sets the pass up status.
991 *
992 * @returns VINF_SUCCESS.
993 * @param pVCpu The cross context virtual CPU structure of the
994 * calling thread.
995 * @param rcPassUp The pass up status. Must be informational.
996 * VINF_SUCCESS is not allowed.
997 */
998IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
999{
1000 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1001
1002 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1003 if (rcOldPassUp == VINF_SUCCESS)
1004 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1005 /* If both are EM scheduling codes, use EM priority rules. */
1006 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1007 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1008 {
1009 if (rcPassUp < rcOldPassUp)
1010 {
1011 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1012 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1013 }
1014 else
1015 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1016 }
1017 /* Override EM scheduling with specific status code. */
1018 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1019 {
1020 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1021 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1022 }
1023 /* Don't override specific status code, first come first served. */
1024 else
1025 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1026 return VINF_SUCCESS;
1027}
1028
1029
1030/**
1031 * Calculates the CPU mode.
1032 *
1033 * This is mainly for updating IEMCPU::enmCpuMode.
1034 *
1035 * @returns CPU mode.
1036 * @param pVCpu The cross context virtual CPU structure of the
1037 * calling thread.
1038 */
1039DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1040{
1041 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1042 return IEMMODE_64BIT;
1043 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1044 return IEMMODE_32BIT;
1045 return IEMMODE_16BIT;
1046}
1047
1048
1049/**
1050 * Initializes the execution state.
1051 *
1052 * @param pVCpu The cross context virtual CPU structure of the
1053 * calling thread.
1054 * @param fBypassHandlers Whether to bypass access handlers.
1055 *
1056 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1057 * side-effects in strict builds.
1058 */
1059DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1060{
1061 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1062 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1063
1064#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1073#endif
1074
1075#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1076 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1077#endif
1078 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1079 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1080#ifdef VBOX_STRICT
1081 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1082 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1083 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1084 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1085 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1086 pVCpu->iem.s.uRexReg = 127;
1087 pVCpu->iem.s.uRexB = 127;
1088 pVCpu->iem.s.offModRm = 127;
1089 pVCpu->iem.s.uRexIndex = 127;
1090 pVCpu->iem.s.iEffSeg = 127;
1091 pVCpu->iem.s.idxPrefix = 127;
1092 pVCpu->iem.s.uVex3rdReg = 127;
1093 pVCpu->iem.s.uVexLength = 127;
1094 pVCpu->iem.s.fEvexStuff = 127;
1095 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1096# ifdef IEM_WITH_CODE_TLB
1097 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1098 pVCpu->iem.s.pbInstrBuf = NULL;
1099 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1100 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1101 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1102 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1103# else
1104 pVCpu->iem.s.offOpcode = 127;
1105 pVCpu->iem.s.cbOpcode = 127;
1106# endif
1107#endif
1108
1109 pVCpu->iem.s.cActiveMappings = 0;
1110 pVCpu->iem.s.iNextMapping = 0;
1111 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1112 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1113#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1114 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1115 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1116 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1117 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1118 if (!pVCpu->iem.s.fInPatchCode)
1119 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1120#endif
1121#if 0
1122#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) && !defined(IN_RC)
1123 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1124 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1125 {
1126 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1127 Assert(pVmcs);
1128 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1129 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1130 {
1131 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1132 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1133 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1134 AssertRC(rc);
1135 }
1136 }
1137#endif
1138#endif
1139}
1140
1141#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1142/**
1143 * Performs a minimal reinitialization of the execution state.
1144 *
1145 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1146 * 'world-switch' types operations on the CPU. Currently only nested
1147 * hardware-virtualization uses it.
1148 *
1149 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1150 */
1151IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1152{
1153 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1154 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1155
1156 pVCpu->iem.s.uCpl = uCpl;
1157 pVCpu->iem.s.enmCpuMode = enmMode;
1158 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1159 pVCpu->iem.s.enmEffAddrMode = enmMode;
1160 if (enmMode != IEMMODE_64BIT)
1161 {
1162 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1163 pVCpu->iem.s.enmEffOpSize = enmMode;
1164 }
1165 else
1166 {
1167 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1168 pVCpu->iem.s.enmEffOpSize = enmMode;
1169 }
1170 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1171#ifndef IEM_WITH_CODE_TLB
1172 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1173 pVCpu->iem.s.offOpcode = 0;
1174 pVCpu->iem.s.cbOpcode = 0;
1175#endif
1176 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1177}
1178#endif
1179
1180/**
1181 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1182 *
1183 * @param pVCpu The cross context virtual CPU structure of the
1184 * calling thread.
1185 */
1186DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1187{
1188 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1189#ifdef VBOX_STRICT
1190# ifdef IEM_WITH_CODE_TLB
1191 NOREF(pVCpu);
1192# else
1193 pVCpu->iem.s.cbOpcode = 0;
1194# endif
1195#else
1196 NOREF(pVCpu);
1197#endif
1198}
1199
1200
1201/**
1202 * Initializes the decoder state.
1203 *
1204 * iemReInitDecoder is mostly a copy of this function.
1205 *
1206 * @param pVCpu The cross context virtual CPU structure of the
1207 * calling thread.
1208 * @param fBypassHandlers Whether to bypass access handlers.
1209 */
1210DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1211{
1212 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1213 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1214
1215#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1221 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1224#endif
1225
1226#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1227 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1228#endif
1229 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1230 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1231 pVCpu->iem.s.enmCpuMode = enmMode;
1232 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1233 pVCpu->iem.s.enmEffAddrMode = enmMode;
1234 if (enmMode != IEMMODE_64BIT)
1235 {
1236 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1237 pVCpu->iem.s.enmEffOpSize = enmMode;
1238 }
1239 else
1240 {
1241 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1242 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1243 }
1244 pVCpu->iem.s.fPrefixes = 0;
1245 pVCpu->iem.s.uRexReg = 0;
1246 pVCpu->iem.s.uRexB = 0;
1247 pVCpu->iem.s.uRexIndex = 0;
1248 pVCpu->iem.s.idxPrefix = 0;
1249 pVCpu->iem.s.uVex3rdReg = 0;
1250 pVCpu->iem.s.uVexLength = 0;
1251 pVCpu->iem.s.fEvexStuff = 0;
1252 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1253#ifdef IEM_WITH_CODE_TLB
1254 pVCpu->iem.s.pbInstrBuf = NULL;
1255 pVCpu->iem.s.offInstrNextByte = 0;
1256 pVCpu->iem.s.offCurInstrStart = 0;
1257# ifdef VBOX_STRICT
1258 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1259 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1260 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1261# endif
1262#else
1263 pVCpu->iem.s.offOpcode = 0;
1264 pVCpu->iem.s.cbOpcode = 0;
1265#endif
1266 pVCpu->iem.s.offModRm = 0;
1267 pVCpu->iem.s.cActiveMappings = 0;
1268 pVCpu->iem.s.iNextMapping = 0;
1269 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1270 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1271#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1272 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1273 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1274 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1275 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1276 if (!pVCpu->iem.s.fInPatchCode)
1277 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1278#endif
1279
1280#ifdef DBGFTRACE_ENABLED
1281 switch (enmMode)
1282 {
1283 case IEMMODE_64BIT:
1284 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1285 break;
1286 case IEMMODE_32BIT:
1287 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1288 break;
1289 case IEMMODE_16BIT:
1290 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1291 break;
1292 }
1293#endif
1294}
1295
1296
1297/**
1298 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1299 *
1300 * This is mostly a copy of iemInitDecoder.
1301 *
1302 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1303 */
1304DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1305{
1306 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1307
1308#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1313 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1314 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1315 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1316 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1317#endif
1318
1319 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1320 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1321 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1322 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1323 pVCpu->iem.s.enmEffAddrMode = enmMode;
1324 if (enmMode != IEMMODE_64BIT)
1325 {
1326 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1327 pVCpu->iem.s.enmEffOpSize = enmMode;
1328 }
1329 else
1330 {
1331 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1332 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1333 }
1334 pVCpu->iem.s.fPrefixes = 0;
1335 pVCpu->iem.s.uRexReg = 0;
1336 pVCpu->iem.s.uRexB = 0;
1337 pVCpu->iem.s.uRexIndex = 0;
1338 pVCpu->iem.s.idxPrefix = 0;
1339 pVCpu->iem.s.uVex3rdReg = 0;
1340 pVCpu->iem.s.uVexLength = 0;
1341 pVCpu->iem.s.fEvexStuff = 0;
1342 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1343#ifdef IEM_WITH_CODE_TLB
1344 if (pVCpu->iem.s.pbInstrBuf)
1345 {
1346 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1347 - pVCpu->iem.s.uInstrBufPc;
1348 if (off < pVCpu->iem.s.cbInstrBufTotal)
1349 {
1350 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1351 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1352 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1353 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1354 else
1355 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1356 }
1357 else
1358 {
1359 pVCpu->iem.s.pbInstrBuf = NULL;
1360 pVCpu->iem.s.offInstrNextByte = 0;
1361 pVCpu->iem.s.offCurInstrStart = 0;
1362 pVCpu->iem.s.cbInstrBuf = 0;
1363 pVCpu->iem.s.cbInstrBufTotal = 0;
1364 }
1365 }
1366 else
1367 {
1368 pVCpu->iem.s.offInstrNextByte = 0;
1369 pVCpu->iem.s.offCurInstrStart = 0;
1370 pVCpu->iem.s.cbInstrBuf = 0;
1371 pVCpu->iem.s.cbInstrBufTotal = 0;
1372 }
1373#else
1374 pVCpu->iem.s.cbOpcode = 0;
1375 pVCpu->iem.s.offOpcode = 0;
1376#endif
1377 pVCpu->iem.s.offModRm = 0;
1378 Assert(pVCpu->iem.s.cActiveMappings == 0);
1379 pVCpu->iem.s.iNextMapping = 0;
1380 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1381 Assert(pVCpu->iem.s.fBypassHandlers == false);
1382#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1383 if (!pVCpu->iem.s.fInPatchCode)
1384 { /* likely */ }
1385 else
1386 {
1387 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1388 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1389 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1390 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1391 if (!pVCpu->iem.s.fInPatchCode)
1392 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1393 }
1394#endif
1395
1396#ifdef DBGFTRACE_ENABLED
1397 switch (enmMode)
1398 {
1399 case IEMMODE_64BIT:
1400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1401 break;
1402 case IEMMODE_32BIT:
1403 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1404 break;
1405 case IEMMODE_16BIT:
1406 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1407 break;
1408 }
1409#endif
1410}
1411
1412
1413
1414/**
1415 * Prefetch opcodes the first time when starting executing.
1416 *
1417 * @returns Strict VBox status code.
1418 * @param pVCpu The cross context virtual CPU structure of the
1419 * calling thread.
1420 * @param fBypassHandlers Whether to bypass access handlers.
1421 */
1422IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1423{
1424 iemInitDecoder(pVCpu, fBypassHandlers);
1425
1426#ifdef IEM_WITH_CODE_TLB
1427 /** @todo Do ITLB lookup here. */
1428
1429#else /* !IEM_WITH_CODE_TLB */
1430
1431 /*
1432 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1433 *
1434 * First translate CS:rIP to a physical address.
1435 */
1436 uint32_t cbToTryRead;
1437 RTGCPTR GCPtrPC;
1438 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1439 {
1440 cbToTryRead = PAGE_SIZE;
1441 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1442 if (IEM_IS_CANONICAL(GCPtrPC))
1443 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1444 else
1445 return iemRaiseGeneralProtectionFault0(pVCpu);
1446 }
1447 else
1448 {
1449 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1450 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1451 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1452 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1453 else
1454 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1455 if (cbToTryRead) { /* likely */ }
1456 else /* overflowed */
1457 {
1458 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1459 cbToTryRead = UINT32_MAX;
1460 }
1461 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1462 Assert(GCPtrPC <= UINT32_MAX);
1463 }
1464
1465# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1466 /* Allow interpretation of patch manager code blocks since they can for
1467 instance throw #PFs for perfectly good reasons. */
1468 if (pVCpu->iem.s.fInPatchCode)
1469 {
1470 size_t cbRead = 0;
1471 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1472 AssertRCReturn(rc, rc);
1473 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1474 return VINF_SUCCESS;
1475 }
1476# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1477
1478 RTGCPHYS GCPhys;
1479 uint64_t fFlags;
1480 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1481 if (RT_SUCCESS(rc)) { /* probable */ }
1482 else
1483 {
1484 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1485 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1486 }
1487 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1488 else
1489 {
1490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1491 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1492 }
1493 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1494 else
1495 {
1496 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1497 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1498 }
1499 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1500 /** @todo Check reserved bits and such stuff. PGM is better at doing
1501 * that, so do it when implementing the guest virtual address
1502 * TLB... */
1503
1504 /*
1505 * Read the bytes at this address.
1506 */
1507 PVM pVM = pVCpu->CTX_SUFF(pVM);
1508# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1509 size_t cbActual;
1510 if ( PATMIsEnabled(pVM)
1511 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1512 {
1513 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1514 Assert(cbActual > 0);
1515 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1516 }
1517 else
1518# endif
1519 {
1520 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1521 if (cbToTryRead > cbLeftOnPage)
1522 cbToTryRead = cbLeftOnPage;
1523 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1524 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1525
1526 if (!pVCpu->iem.s.fBypassHandlers)
1527 {
1528 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1529 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1530 { /* likely */ }
1531 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1532 {
1533 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1534 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1535 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1536 }
1537 else
1538 {
1539 Log((RT_SUCCESS(rcStrict)
1540 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1541 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1542 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1543 return rcStrict;
1544 }
1545 }
1546 else
1547 {
1548 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1549 if (RT_SUCCESS(rc))
1550 { /* likely */ }
1551 else
1552 {
1553 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1554 GCPtrPC, GCPhys, rc, cbToTryRead));
1555 return rc;
1556 }
1557 }
1558 pVCpu->iem.s.cbOpcode = cbToTryRead;
1559 }
1560#endif /* !IEM_WITH_CODE_TLB */
1561 return VINF_SUCCESS;
1562}
1563
1564
1565/**
1566 * Invalidates the IEM TLBs.
1567 *
1568 * This is called internally as well as by PGM when moving GC mappings.
1569 *
1570 * @returns
1571 * @param pVCpu The cross context virtual CPU structure of the calling
1572 * thread.
1573 * @param fVmm Set when PGM calls us with a remapping.
1574 */
1575VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1576{
1577#ifdef IEM_WITH_CODE_TLB
1578 pVCpu->iem.s.cbInstrBufTotal = 0;
1579 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1580 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1581 { /* very likely */ }
1582 else
1583 {
1584 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1585 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1586 while (i-- > 0)
1587 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1588 }
1589#endif
1590
1591#ifdef IEM_WITH_DATA_TLB
1592 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1593 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1594 { /* very likely */ }
1595 else
1596 {
1597 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1598 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1599 while (i-- > 0)
1600 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1601 }
1602#endif
1603 NOREF(pVCpu); NOREF(fVmm);
1604}
1605
1606
1607/**
1608 * Invalidates a page in the TLBs.
1609 *
1610 * @param pVCpu The cross context virtual CPU structure of the calling
1611 * thread.
1612 * @param GCPtr The address of the page to invalidate
1613 */
1614VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1615{
1616#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1617 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1618 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1619 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1620 uintptr_t idx = (uint8_t)GCPtr;
1621
1622# ifdef IEM_WITH_CODE_TLB
1623 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1624 {
1625 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1626 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1627 pVCpu->iem.s.cbInstrBufTotal = 0;
1628 }
1629# endif
1630
1631# ifdef IEM_WITH_DATA_TLB
1632 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1633 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1634# endif
1635#else
1636 NOREF(pVCpu); NOREF(GCPtr);
1637#endif
1638}
1639
1640
1641/**
1642 * Invalidates the host physical aspects of the IEM TLBs.
1643 *
1644 * This is called internally as well as by PGM when moving GC mappings.
1645 *
1646 * @param pVCpu The cross context virtual CPU structure of the calling
1647 * thread.
1648 */
1649VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1650{
1651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1652 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1653
1654# ifdef IEM_WITH_CODE_TLB
1655 pVCpu->iem.s.cbInstrBufTotal = 0;
1656# endif
1657 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1658 if (uTlbPhysRev != 0)
1659 {
1660 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1661 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1662 }
1663 else
1664 {
1665 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1666 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1667
1668 unsigned i;
1669# ifdef IEM_WITH_CODE_TLB
1670 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1671 while (i-- > 0)
1672 {
1673 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1674 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1675 }
1676# endif
1677# ifdef IEM_WITH_DATA_TLB
1678 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1679 while (i-- > 0)
1680 {
1681 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1682 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1683 }
1684# endif
1685 }
1686#else
1687 NOREF(pVCpu);
1688#endif
1689}
1690
1691
1692/**
1693 * Invalidates the host physical aspects of the IEM TLBs.
1694 *
1695 * This is called internally as well as by PGM when moving GC mappings.
1696 *
1697 * @param pVM The cross context VM structure.
1698 *
1699 * @remarks Caller holds the PGM lock.
1700 */
1701VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1702{
1703 RT_NOREF_PV(pVM);
1704}
1705
1706#ifdef IEM_WITH_CODE_TLB
1707
1708/**
1709 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1710 * failure and jumps.
1711 *
1712 * We end up here for a number of reasons:
1713 * - pbInstrBuf isn't yet initialized.
1714 * - Advancing beyond the buffer boundrary (e.g. cross page).
1715 * - Advancing beyond the CS segment limit.
1716 * - Fetching from non-mappable page (e.g. MMIO).
1717 *
1718 * @param pVCpu The cross context virtual CPU structure of the
1719 * calling thread.
1720 * @param pvDst Where to return the bytes.
1721 * @param cbDst Number of bytes to read.
1722 *
1723 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1724 */
1725IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1726{
1727#ifdef IN_RING3
1728 for (;;)
1729 {
1730 Assert(cbDst <= 8);
1731 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1732
1733 /*
1734 * We might have a partial buffer match, deal with that first to make the
1735 * rest simpler. This is the first part of the cross page/buffer case.
1736 */
1737 if (pVCpu->iem.s.pbInstrBuf != NULL)
1738 {
1739 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1740 {
1741 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1742 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1743 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1744
1745 cbDst -= cbCopy;
1746 pvDst = (uint8_t *)pvDst + cbCopy;
1747 offBuf += cbCopy;
1748 pVCpu->iem.s.offInstrNextByte += offBuf;
1749 }
1750 }
1751
1752 /*
1753 * Check segment limit, figuring how much we're allowed to access at this point.
1754 *
1755 * We will fault immediately if RIP is past the segment limit / in non-canonical
1756 * territory. If we do continue, there are one or more bytes to read before we
1757 * end up in trouble and we need to do that first before faulting.
1758 */
1759 RTGCPTR GCPtrFirst;
1760 uint32_t cbMaxRead;
1761 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1762 {
1763 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1764 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1765 { /* likely */ }
1766 else
1767 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1768 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1769 }
1770 else
1771 {
1772 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1773 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1774 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1775 { /* likely */ }
1776 else
1777 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1778 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1779 if (cbMaxRead != 0)
1780 { /* likely */ }
1781 else
1782 {
1783 /* Overflowed because address is 0 and limit is max. */
1784 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1785 cbMaxRead = X86_PAGE_SIZE;
1786 }
1787 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1788 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1789 if (cbMaxRead2 < cbMaxRead)
1790 cbMaxRead = cbMaxRead2;
1791 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1792 }
1793
1794 /*
1795 * Get the TLB entry for this piece of code.
1796 */
1797 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1798 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1799 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1800 if (pTlbe->uTag == uTag)
1801 {
1802 /* likely when executing lots of code, otherwise unlikely */
1803# ifdef VBOX_WITH_STATISTICS
1804 pVCpu->iem.s.CodeTlb.cTlbHits++;
1805# endif
1806 }
1807 else
1808 {
1809 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1810# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1811 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1812 {
1813 pTlbe->uTag = uTag;
1814 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1815 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1816 pTlbe->GCPhys = NIL_RTGCPHYS;
1817 pTlbe->pbMappingR3 = NULL;
1818 }
1819 else
1820# endif
1821 {
1822 RTGCPHYS GCPhys;
1823 uint64_t fFlags;
1824 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1825 if (RT_FAILURE(rc))
1826 {
1827 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1828 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1829 }
1830
1831 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1832 pTlbe->uTag = uTag;
1833 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1834 pTlbe->GCPhys = GCPhys;
1835 pTlbe->pbMappingR3 = NULL;
1836 }
1837 }
1838
1839 /*
1840 * Check TLB page table level access flags.
1841 */
1842 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1843 {
1844 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1845 {
1846 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1847 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1848 }
1849 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1850 {
1851 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1852 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1853 }
1854 }
1855
1856# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1857 /*
1858 * Allow interpretation of patch manager code blocks since they can for
1859 * instance throw #PFs for perfectly good reasons.
1860 */
1861 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1862 { /* no unlikely */ }
1863 else
1864 {
1865 /** @todo Could be optimized this a little in ring-3 if we liked. */
1866 size_t cbRead = 0;
1867 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1868 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1869 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1870 return;
1871 }
1872# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1873
1874 /*
1875 * Look up the physical page info if necessary.
1876 */
1877 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1878 { /* not necessary */ }
1879 else
1880 {
1881 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1882 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1883 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1884 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1885 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1886 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1887 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1888 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1889 }
1890
1891# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1892 /*
1893 * Try do a direct read using the pbMappingR3 pointer.
1894 */
1895 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1896 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1897 {
1898 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1899 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1900 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1901 {
1902 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1903 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1904 }
1905 else
1906 {
1907 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1908 Assert(cbInstr < cbMaxRead);
1909 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1910 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1911 }
1912 if (cbDst <= cbMaxRead)
1913 {
1914 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1915 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1916 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1917 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1918 return;
1919 }
1920 pVCpu->iem.s.pbInstrBuf = NULL;
1921
1922 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1923 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1924 }
1925 else
1926# endif
1927#if 0
1928 /*
1929 * If there is no special read handling, so we can read a bit more and
1930 * put it in the prefetch buffer.
1931 */
1932 if ( cbDst < cbMaxRead
1933 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1934 {
1935 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1936 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1937 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1938 { /* likely */ }
1939 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1940 {
1941 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1942 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1943 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1944 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1945 }
1946 else
1947 {
1948 Log((RT_SUCCESS(rcStrict)
1949 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1950 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1951 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1952 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1953 }
1954 }
1955 /*
1956 * Special read handling, so only read exactly what's needed.
1957 * This is a highly unlikely scenario.
1958 */
1959 else
1960#endif
1961 {
1962 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1963 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1964 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1965 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1966 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1967 { /* likely */ }
1968 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1969 {
1970 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1971 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1972 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1973 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1974 }
1975 else
1976 {
1977 Log((RT_SUCCESS(rcStrict)
1978 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1979 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1980 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1981 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1982 }
1983 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1984 if (cbToRead == cbDst)
1985 return;
1986 }
1987
1988 /*
1989 * More to read, loop.
1990 */
1991 cbDst -= cbMaxRead;
1992 pvDst = (uint8_t *)pvDst + cbMaxRead;
1993 }
1994#else
1995 RT_NOREF(pvDst, cbDst);
1996 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1997#endif
1998}
1999
2000#else
2001
2002/**
2003 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
2004 * exception if it fails.
2005 *
2006 * @returns Strict VBox status code.
2007 * @param pVCpu The cross context virtual CPU structure of the
2008 * calling thread.
2009 * @param cbMin The minimum number of bytes relative offOpcode
2010 * that must be read.
2011 */
2012IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2013{
2014 /*
2015 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2016 *
2017 * First translate CS:rIP to a physical address.
2018 */
2019 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2020 uint32_t cbToTryRead;
2021 RTGCPTR GCPtrNext;
2022 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2023 {
2024 cbToTryRead = PAGE_SIZE;
2025 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2026 if (!IEM_IS_CANONICAL(GCPtrNext))
2027 return iemRaiseGeneralProtectionFault0(pVCpu);
2028 }
2029 else
2030 {
2031 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2032 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2033 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2034 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2035 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2036 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2037 if (!cbToTryRead) /* overflowed */
2038 {
2039 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2040 cbToTryRead = UINT32_MAX;
2041 /** @todo check out wrapping around the code segment. */
2042 }
2043 if (cbToTryRead < cbMin - cbLeft)
2044 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2045 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2046 }
2047
2048 /* Only read up to the end of the page, and make sure we don't read more
2049 than the opcode buffer can hold. */
2050 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2051 if (cbToTryRead > cbLeftOnPage)
2052 cbToTryRead = cbLeftOnPage;
2053 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2054 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2055/** @todo r=bird: Convert assertion into undefined opcode exception? */
2056 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2057
2058# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2059 /* Allow interpretation of patch manager code blocks since they can for
2060 instance throw #PFs for perfectly good reasons. */
2061 if (pVCpu->iem.s.fInPatchCode)
2062 {
2063 size_t cbRead = 0;
2064 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2065 AssertRCReturn(rc, rc);
2066 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2067 return VINF_SUCCESS;
2068 }
2069# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2070
2071 RTGCPHYS GCPhys;
2072 uint64_t fFlags;
2073 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2074 if (RT_FAILURE(rc))
2075 {
2076 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2077 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2078 }
2079 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2080 {
2081 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2082 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2083 }
2084 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2085 {
2086 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2087 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2088 }
2089 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2090 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2091 /** @todo Check reserved bits and such stuff. PGM is better at doing
2092 * that, so do it when implementing the guest virtual address
2093 * TLB... */
2094
2095 /*
2096 * Read the bytes at this address.
2097 *
2098 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2099 * and since PATM should only patch the start of an instruction there
2100 * should be no need to check again here.
2101 */
2102 if (!pVCpu->iem.s.fBypassHandlers)
2103 {
2104 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2105 cbToTryRead, PGMACCESSORIGIN_IEM);
2106 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2107 { /* likely */ }
2108 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2109 {
2110 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2111 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2112 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2113 }
2114 else
2115 {
2116 Log((RT_SUCCESS(rcStrict)
2117 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2118 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2119 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2120 return rcStrict;
2121 }
2122 }
2123 else
2124 {
2125 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2126 if (RT_SUCCESS(rc))
2127 { /* likely */ }
2128 else
2129 {
2130 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2131 return rc;
2132 }
2133 }
2134 pVCpu->iem.s.cbOpcode += cbToTryRead;
2135 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2136
2137 return VINF_SUCCESS;
2138}
2139
2140#endif /* !IEM_WITH_CODE_TLB */
2141#ifndef IEM_WITH_SETJMP
2142
2143/**
2144 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2145 *
2146 * @returns Strict VBox status code.
2147 * @param pVCpu The cross context virtual CPU structure of the
2148 * calling thread.
2149 * @param pb Where to return the opcode byte.
2150 */
2151DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2152{
2153 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2154 if (rcStrict == VINF_SUCCESS)
2155 {
2156 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2157 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2158 pVCpu->iem.s.offOpcode = offOpcode + 1;
2159 }
2160 else
2161 *pb = 0;
2162 return rcStrict;
2163}
2164
2165
2166/**
2167 * Fetches the next opcode byte.
2168 *
2169 * @returns Strict VBox status code.
2170 * @param pVCpu The cross context virtual CPU structure of the
2171 * calling thread.
2172 * @param pu8 Where to return the opcode byte.
2173 */
2174DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2175{
2176 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2177 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2178 {
2179 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2180 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2181 return VINF_SUCCESS;
2182 }
2183 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2184}
2185
2186#else /* IEM_WITH_SETJMP */
2187
2188/**
2189 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2190 *
2191 * @returns The opcode byte.
2192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2193 */
2194DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2195{
2196# ifdef IEM_WITH_CODE_TLB
2197 uint8_t u8;
2198 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2199 return u8;
2200# else
2201 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2202 if (rcStrict == VINF_SUCCESS)
2203 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2204 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2205# endif
2206}
2207
2208
2209/**
2210 * Fetches the next opcode byte, longjmp on error.
2211 *
2212 * @returns The opcode byte.
2213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2214 */
2215DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2216{
2217# ifdef IEM_WITH_CODE_TLB
2218 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2219 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2220 if (RT_LIKELY( pbBuf != NULL
2221 && offBuf < pVCpu->iem.s.cbInstrBuf))
2222 {
2223 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2224 return pbBuf[offBuf];
2225 }
2226# else
2227 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2228 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2229 {
2230 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2231 return pVCpu->iem.s.abOpcode[offOpcode];
2232 }
2233# endif
2234 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2235}
2236
2237#endif /* IEM_WITH_SETJMP */
2238
2239/**
2240 * Fetches the next opcode byte, returns automatically on failure.
2241 *
2242 * @param a_pu8 Where to return the opcode byte.
2243 * @remark Implicitly references pVCpu.
2244 */
2245#ifndef IEM_WITH_SETJMP
2246# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2247 do \
2248 { \
2249 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2250 if (rcStrict2 == VINF_SUCCESS) \
2251 { /* likely */ } \
2252 else \
2253 return rcStrict2; \
2254 } while (0)
2255#else
2256# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2257#endif /* IEM_WITH_SETJMP */
2258
2259
2260#ifndef IEM_WITH_SETJMP
2261/**
2262 * Fetches the next signed byte from the opcode stream.
2263 *
2264 * @returns Strict VBox status code.
2265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2266 * @param pi8 Where to return the signed byte.
2267 */
2268DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2269{
2270 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2271}
2272#endif /* !IEM_WITH_SETJMP */
2273
2274
2275/**
2276 * Fetches the next signed byte from the opcode stream, returning automatically
2277 * on failure.
2278 *
2279 * @param a_pi8 Where to return the signed byte.
2280 * @remark Implicitly references pVCpu.
2281 */
2282#ifndef IEM_WITH_SETJMP
2283# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2284 do \
2285 { \
2286 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2287 if (rcStrict2 != VINF_SUCCESS) \
2288 return rcStrict2; \
2289 } while (0)
2290#else /* IEM_WITH_SETJMP */
2291# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2292
2293#endif /* IEM_WITH_SETJMP */
2294
2295#ifndef IEM_WITH_SETJMP
2296
2297/**
2298 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2299 *
2300 * @returns Strict VBox status code.
2301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2302 * @param pu16 Where to return the opcode dword.
2303 */
2304DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2305{
2306 uint8_t u8;
2307 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2308 if (rcStrict == VINF_SUCCESS)
2309 *pu16 = (int8_t)u8;
2310 return rcStrict;
2311}
2312
2313
2314/**
2315 * Fetches the next signed byte from the opcode stream, extending it to
2316 * unsigned 16-bit.
2317 *
2318 * @returns Strict VBox status code.
2319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2320 * @param pu16 Where to return the unsigned word.
2321 */
2322DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2323{
2324 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2325 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2326 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2327
2328 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2329 pVCpu->iem.s.offOpcode = offOpcode + 1;
2330 return VINF_SUCCESS;
2331}
2332
2333#endif /* !IEM_WITH_SETJMP */
2334
2335/**
2336 * Fetches the next signed byte from the opcode stream and sign-extending it to
2337 * a word, returning automatically on failure.
2338 *
2339 * @param a_pu16 Where to return the word.
2340 * @remark Implicitly references pVCpu.
2341 */
2342#ifndef IEM_WITH_SETJMP
2343# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2344 do \
2345 { \
2346 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2347 if (rcStrict2 != VINF_SUCCESS) \
2348 return rcStrict2; \
2349 } while (0)
2350#else
2351# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2352#endif
2353
2354#ifndef IEM_WITH_SETJMP
2355
2356/**
2357 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2358 *
2359 * @returns Strict VBox status code.
2360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2361 * @param pu32 Where to return the opcode dword.
2362 */
2363DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2364{
2365 uint8_t u8;
2366 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2367 if (rcStrict == VINF_SUCCESS)
2368 *pu32 = (int8_t)u8;
2369 return rcStrict;
2370}
2371
2372
2373/**
2374 * Fetches the next signed byte from the opcode stream, extending it to
2375 * unsigned 32-bit.
2376 *
2377 * @returns Strict VBox status code.
2378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2379 * @param pu32 Where to return the unsigned dword.
2380 */
2381DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2382{
2383 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2384 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2385 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2386
2387 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2388 pVCpu->iem.s.offOpcode = offOpcode + 1;
2389 return VINF_SUCCESS;
2390}
2391
2392#endif /* !IEM_WITH_SETJMP */
2393
2394/**
2395 * Fetches the next signed byte from the opcode stream and sign-extending it to
2396 * a word, returning automatically on failure.
2397 *
2398 * @param a_pu32 Where to return the word.
2399 * @remark Implicitly references pVCpu.
2400 */
2401#ifndef IEM_WITH_SETJMP
2402#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2403 do \
2404 { \
2405 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2406 if (rcStrict2 != VINF_SUCCESS) \
2407 return rcStrict2; \
2408 } while (0)
2409#else
2410# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2411#endif
2412
2413#ifndef IEM_WITH_SETJMP
2414
2415/**
2416 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2417 *
2418 * @returns Strict VBox status code.
2419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2420 * @param pu64 Where to return the opcode qword.
2421 */
2422DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2423{
2424 uint8_t u8;
2425 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2426 if (rcStrict == VINF_SUCCESS)
2427 *pu64 = (int8_t)u8;
2428 return rcStrict;
2429}
2430
2431
2432/**
2433 * Fetches the next signed byte from the opcode stream, extending it to
2434 * unsigned 64-bit.
2435 *
2436 * @returns Strict VBox status code.
2437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2438 * @param pu64 Where to return the unsigned qword.
2439 */
2440DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2441{
2442 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2443 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2444 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2445
2446 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2447 pVCpu->iem.s.offOpcode = offOpcode + 1;
2448 return VINF_SUCCESS;
2449}
2450
2451#endif /* !IEM_WITH_SETJMP */
2452
2453
2454/**
2455 * Fetches the next signed byte from the opcode stream and sign-extending it to
2456 * a word, returning automatically on failure.
2457 *
2458 * @param a_pu64 Where to return the word.
2459 * @remark Implicitly references pVCpu.
2460 */
2461#ifndef IEM_WITH_SETJMP
2462# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2463 do \
2464 { \
2465 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2466 if (rcStrict2 != VINF_SUCCESS) \
2467 return rcStrict2; \
2468 } while (0)
2469#else
2470# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2471#endif
2472
2473
2474#ifndef IEM_WITH_SETJMP
2475/**
2476 * Fetches the next opcode byte.
2477 *
2478 * @returns Strict VBox status code.
2479 * @param pVCpu The cross context virtual CPU structure of the
2480 * calling thread.
2481 * @param pu8 Where to return the opcode byte.
2482 */
2483DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2484{
2485 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2486 pVCpu->iem.s.offModRm = offOpcode;
2487 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2488 {
2489 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2490 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2491 return VINF_SUCCESS;
2492 }
2493 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2494}
2495#else /* IEM_WITH_SETJMP */
2496/**
2497 * Fetches the next opcode byte, longjmp on error.
2498 *
2499 * @returns The opcode byte.
2500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2501 */
2502DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2503{
2504# ifdef IEM_WITH_CODE_TLB
2505 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2506 pVCpu->iem.s.offModRm = offBuf;
2507 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2508 if (RT_LIKELY( pbBuf != NULL
2509 && offBuf < pVCpu->iem.s.cbInstrBuf))
2510 {
2511 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2512 return pbBuf[offBuf];
2513 }
2514# else
2515 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2516 pVCpu->iem.s.offModRm = offOpcode;
2517 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2518 {
2519 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2520 return pVCpu->iem.s.abOpcode[offOpcode];
2521 }
2522# endif
2523 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2524}
2525#endif /* IEM_WITH_SETJMP */
2526
2527/**
2528 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2529 * on failure.
2530 *
2531 * Will note down the position of the ModR/M byte for VT-x exits.
2532 *
2533 * @param a_pbRm Where to return the RM opcode byte.
2534 * @remark Implicitly references pVCpu.
2535 */
2536#ifndef IEM_WITH_SETJMP
2537# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2538 do \
2539 { \
2540 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2541 if (rcStrict2 == VINF_SUCCESS) \
2542 { /* likely */ } \
2543 else \
2544 return rcStrict2; \
2545 } while (0)
2546#else
2547# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2548#endif /* IEM_WITH_SETJMP */
2549
2550
2551#ifndef IEM_WITH_SETJMP
2552
2553/**
2554 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2555 *
2556 * @returns Strict VBox status code.
2557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2558 * @param pu16 Where to return the opcode word.
2559 */
2560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2561{
2562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2563 if (rcStrict == VINF_SUCCESS)
2564 {
2565 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2566# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2567 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2568# else
2569 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2570# endif
2571 pVCpu->iem.s.offOpcode = offOpcode + 2;
2572 }
2573 else
2574 *pu16 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode word.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu16 Where to return the opcode word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2587{
2588 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2590 {
2591 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2592# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2593 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2594# else
2595 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2596# endif
2597 return VINF_SUCCESS;
2598 }
2599 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2600}
2601
2602#else /* IEM_WITH_SETJMP */
2603
2604/**
2605 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2606 *
2607 * @returns The opcode word.
2608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2609 */
2610DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2611{
2612# ifdef IEM_WITH_CODE_TLB
2613 uint16_t u16;
2614 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2615 return u16;
2616# else
2617 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2618 if (rcStrict == VINF_SUCCESS)
2619 {
2620 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2621 pVCpu->iem.s.offOpcode += 2;
2622# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2623 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2624# else
2625 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2626# endif
2627 }
2628 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2629# endif
2630}
2631
2632
2633/**
2634 * Fetches the next opcode word, longjmp on error.
2635 *
2636 * @returns The opcode word.
2637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2638 */
2639DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2640{
2641# ifdef IEM_WITH_CODE_TLB
2642 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2643 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2644 if (RT_LIKELY( pbBuf != NULL
2645 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2646 {
2647 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2648# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2649 return *(uint16_t const *)&pbBuf[offBuf];
2650# else
2651 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2652# endif
2653 }
2654# else
2655 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2656 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2657 {
2658 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2659# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2660 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2661# else
2662 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2663# endif
2664 }
2665# endif
2666 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2667}
2668
2669#endif /* IEM_WITH_SETJMP */
2670
2671
2672/**
2673 * Fetches the next opcode word, returns automatically on failure.
2674 *
2675 * @param a_pu16 Where to return the opcode word.
2676 * @remark Implicitly references pVCpu.
2677 */
2678#ifndef IEM_WITH_SETJMP
2679# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2680 do \
2681 { \
2682 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2683 if (rcStrict2 != VINF_SUCCESS) \
2684 return rcStrict2; \
2685 } while (0)
2686#else
2687# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2688#endif
2689
2690#ifndef IEM_WITH_SETJMP
2691
2692/**
2693 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2694 *
2695 * @returns Strict VBox status code.
2696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2697 * @param pu32 Where to return the opcode double word.
2698 */
2699DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2700{
2701 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2702 if (rcStrict == VINF_SUCCESS)
2703 {
2704 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2705 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2706 pVCpu->iem.s.offOpcode = offOpcode + 2;
2707 }
2708 else
2709 *pu32 = 0;
2710 return rcStrict;
2711}
2712
2713
2714/**
2715 * Fetches the next opcode word, zero extending it to a double word.
2716 *
2717 * @returns Strict VBox status code.
2718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2719 * @param pu32 Where to return the opcode double word.
2720 */
2721DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2722{
2723 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2724 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2725 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2726
2727 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2728 pVCpu->iem.s.offOpcode = offOpcode + 2;
2729 return VINF_SUCCESS;
2730}
2731
2732#endif /* !IEM_WITH_SETJMP */
2733
2734
2735/**
2736 * Fetches the next opcode word and zero extends it to a double word, returns
2737 * automatically on failure.
2738 *
2739 * @param a_pu32 Where to return the opcode double word.
2740 * @remark Implicitly references pVCpu.
2741 */
2742#ifndef IEM_WITH_SETJMP
2743# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2744 do \
2745 { \
2746 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2747 if (rcStrict2 != VINF_SUCCESS) \
2748 return rcStrict2; \
2749 } while (0)
2750#else
2751# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2752#endif
2753
2754#ifndef IEM_WITH_SETJMP
2755
2756/**
2757 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2758 *
2759 * @returns Strict VBox status code.
2760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2761 * @param pu64 Where to return the opcode quad word.
2762 */
2763DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2764{
2765 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2766 if (rcStrict == VINF_SUCCESS)
2767 {
2768 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2769 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2770 pVCpu->iem.s.offOpcode = offOpcode + 2;
2771 }
2772 else
2773 *pu64 = 0;
2774 return rcStrict;
2775}
2776
2777
2778/**
2779 * Fetches the next opcode word, zero extending it to a quad word.
2780 *
2781 * @returns Strict VBox status code.
2782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2783 * @param pu64 Where to return the opcode quad word.
2784 */
2785DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2786{
2787 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2788 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2789 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2790
2791 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2792 pVCpu->iem.s.offOpcode = offOpcode + 2;
2793 return VINF_SUCCESS;
2794}
2795
2796#endif /* !IEM_WITH_SETJMP */
2797
2798/**
2799 * Fetches the next opcode word and zero extends it to a quad word, returns
2800 * automatically on failure.
2801 *
2802 * @param a_pu64 Where to return the opcode quad word.
2803 * @remark Implicitly references pVCpu.
2804 */
2805#ifndef IEM_WITH_SETJMP
2806# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2807 do \
2808 { \
2809 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2810 if (rcStrict2 != VINF_SUCCESS) \
2811 return rcStrict2; \
2812 } while (0)
2813#else
2814# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2815#endif
2816
2817
2818#ifndef IEM_WITH_SETJMP
2819/**
2820 * Fetches the next signed word from the opcode stream.
2821 *
2822 * @returns Strict VBox status code.
2823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2824 * @param pi16 Where to return the signed word.
2825 */
2826DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2827{
2828 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2829}
2830#endif /* !IEM_WITH_SETJMP */
2831
2832
2833/**
2834 * Fetches the next signed word from the opcode stream, returning automatically
2835 * on failure.
2836 *
2837 * @param a_pi16 Where to return the signed word.
2838 * @remark Implicitly references pVCpu.
2839 */
2840#ifndef IEM_WITH_SETJMP
2841# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2842 do \
2843 { \
2844 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2845 if (rcStrict2 != VINF_SUCCESS) \
2846 return rcStrict2; \
2847 } while (0)
2848#else
2849# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2850#endif
2851
2852#ifndef IEM_WITH_SETJMP
2853
2854/**
2855 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2856 *
2857 * @returns Strict VBox status code.
2858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2859 * @param pu32 Where to return the opcode dword.
2860 */
2861DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2862{
2863 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2864 if (rcStrict == VINF_SUCCESS)
2865 {
2866 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2867# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2868 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2869# else
2870 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2871 pVCpu->iem.s.abOpcode[offOpcode + 1],
2872 pVCpu->iem.s.abOpcode[offOpcode + 2],
2873 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2874# endif
2875 pVCpu->iem.s.offOpcode = offOpcode + 4;
2876 }
2877 else
2878 *pu32 = 0;
2879 return rcStrict;
2880}
2881
2882
2883/**
2884 * Fetches the next opcode dword.
2885 *
2886 * @returns Strict VBox status code.
2887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2888 * @param pu32 Where to return the opcode double word.
2889 */
2890DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2891{
2892 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2893 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2894 {
2895 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2896# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2897 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2898# else
2899 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2900 pVCpu->iem.s.abOpcode[offOpcode + 1],
2901 pVCpu->iem.s.abOpcode[offOpcode + 2],
2902 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2903# endif
2904 return VINF_SUCCESS;
2905 }
2906 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2907}
2908
2909#else /* !IEM_WITH_SETJMP */
2910
2911/**
2912 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2913 *
2914 * @returns The opcode dword.
2915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2916 */
2917DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2918{
2919# ifdef IEM_WITH_CODE_TLB
2920 uint32_t u32;
2921 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2922 return u32;
2923# else
2924 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2925 if (rcStrict == VINF_SUCCESS)
2926 {
2927 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2928 pVCpu->iem.s.offOpcode = offOpcode + 4;
2929# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2930 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2931# else
2932 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2933 pVCpu->iem.s.abOpcode[offOpcode + 1],
2934 pVCpu->iem.s.abOpcode[offOpcode + 2],
2935 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2936# endif
2937 }
2938 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2939# endif
2940}
2941
2942
2943/**
2944 * Fetches the next opcode dword, longjmp on error.
2945 *
2946 * @returns The opcode dword.
2947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2948 */
2949DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2950{
2951# ifdef IEM_WITH_CODE_TLB
2952 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2953 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2954 if (RT_LIKELY( pbBuf != NULL
2955 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2956 {
2957 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2958# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2959 return *(uint32_t const *)&pbBuf[offBuf];
2960# else
2961 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2962 pbBuf[offBuf + 1],
2963 pbBuf[offBuf + 2],
2964 pbBuf[offBuf + 3]);
2965# endif
2966 }
2967# else
2968 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2969 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2970 {
2971 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2972# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2973 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2974# else
2975 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2976 pVCpu->iem.s.abOpcode[offOpcode + 1],
2977 pVCpu->iem.s.abOpcode[offOpcode + 2],
2978 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2979# endif
2980 }
2981# endif
2982 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2983}
2984
2985#endif /* !IEM_WITH_SETJMP */
2986
2987
2988/**
2989 * Fetches the next opcode dword, returns automatically on failure.
2990 *
2991 * @param a_pu32 Where to return the opcode dword.
2992 * @remark Implicitly references pVCpu.
2993 */
2994#ifndef IEM_WITH_SETJMP
2995# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2996 do \
2997 { \
2998 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2999 if (rcStrict2 != VINF_SUCCESS) \
3000 return rcStrict2; \
3001 } while (0)
3002#else
3003# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
3004#endif
3005
3006#ifndef IEM_WITH_SETJMP
3007
3008/**
3009 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3010 *
3011 * @returns Strict VBox status code.
3012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3013 * @param pu64 Where to return the opcode dword.
3014 */
3015DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3016{
3017 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3018 if (rcStrict == VINF_SUCCESS)
3019 {
3020 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3021 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3022 pVCpu->iem.s.abOpcode[offOpcode + 1],
3023 pVCpu->iem.s.abOpcode[offOpcode + 2],
3024 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3025 pVCpu->iem.s.offOpcode = offOpcode + 4;
3026 }
3027 else
3028 *pu64 = 0;
3029 return rcStrict;
3030}
3031
3032
3033/**
3034 * Fetches the next opcode dword, zero extending it to a quad word.
3035 *
3036 * @returns Strict VBox status code.
3037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3038 * @param pu64 Where to return the opcode quad word.
3039 */
3040DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3041{
3042 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3043 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3044 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3045
3046 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3047 pVCpu->iem.s.abOpcode[offOpcode + 1],
3048 pVCpu->iem.s.abOpcode[offOpcode + 2],
3049 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3050 pVCpu->iem.s.offOpcode = offOpcode + 4;
3051 return VINF_SUCCESS;
3052}
3053
3054#endif /* !IEM_WITH_SETJMP */
3055
3056
3057/**
3058 * Fetches the next opcode dword and zero extends it to a quad word, returns
3059 * automatically on failure.
3060 *
3061 * @param a_pu64 Where to return the opcode quad word.
3062 * @remark Implicitly references pVCpu.
3063 */
3064#ifndef IEM_WITH_SETJMP
3065# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3066 do \
3067 { \
3068 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3069 if (rcStrict2 != VINF_SUCCESS) \
3070 return rcStrict2; \
3071 } while (0)
3072#else
3073# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3074#endif
3075
3076
3077#ifndef IEM_WITH_SETJMP
3078/**
3079 * Fetches the next signed double word from the opcode stream.
3080 *
3081 * @returns Strict VBox status code.
3082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3083 * @param pi32 Where to return the signed double word.
3084 */
3085DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3086{
3087 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3088}
3089#endif
3090
3091/**
3092 * Fetches the next signed double word from the opcode stream, returning
3093 * automatically on failure.
3094 *
3095 * @param a_pi32 Where to return the signed double word.
3096 * @remark Implicitly references pVCpu.
3097 */
3098#ifndef IEM_WITH_SETJMP
3099# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3100 do \
3101 { \
3102 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3103 if (rcStrict2 != VINF_SUCCESS) \
3104 return rcStrict2; \
3105 } while (0)
3106#else
3107# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3108#endif
3109
3110#ifndef IEM_WITH_SETJMP
3111
3112/**
3113 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3114 *
3115 * @returns Strict VBox status code.
3116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3117 * @param pu64 Where to return the opcode qword.
3118 */
3119DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3120{
3121 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3122 if (rcStrict == VINF_SUCCESS)
3123 {
3124 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3125 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3126 pVCpu->iem.s.abOpcode[offOpcode + 1],
3127 pVCpu->iem.s.abOpcode[offOpcode + 2],
3128 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3129 pVCpu->iem.s.offOpcode = offOpcode + 4;
3130 }
3131 else
3132 *pu64 = 0;
3133 return rcStrict;
3134}
3135
3136
3137/**
3138 * Fetches the next opcode dword, sign extending it into a quad word.
3139 *
3140 * @returns Strict VBox status code.
3141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3142 * @param pu64 Where to return the opcode quad word.
3143 */
3144DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3145{
3146 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3147 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3148 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3149
3150 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3151 pVCpu->iem.s.abOpcode[offOpcode + 1],
3152 pVCpu->iem.s.abOpcode[offOpcode + 2],
3153 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3154 *pu64 = i32;
3155 pVCpu->iem.s.offOpcode = offOpcode + 4;
3156 return VINF_SUCCESS;
3157}
3158
3159#endif /* !IEM_WITH_SETJMP */
3160
3161
3162/**
3163 * Fetches the next opcode double word and sign extends it to a quad word,
3164 * returns automatically on failure.
3165 *
3166 * @param a_pu64 Where to return the opcode quad word.
3167 * @remark Implicitly references pVCpu.
3168 */
3169#ifndef IEM_WITH_SETJMP
3170# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3171 do \
3172 { \
3173 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3174 if (rcStrict2 != VINF_SUCCESS) \
3175 return rcStrict2; \
3176 } while (0)
3177#else
3178# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3179#endif
3180
3181#ifndef IEM_WITH_SETJMP
3182
3183/**
3184 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3185 *
3186 * @returns Strict VBox status code.
3187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3188 * @param pu64 Where to return the opcode qword.
3189 */
3190DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3191{
3192 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3193 if (rcStrict == VINF_SUCCESS)
3194 {
3195 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3196# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3197 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3198# else
3199 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3200 pVCpu->iem.s.abOpcode[offOpcode + 1],
3201 pVCpu->iem.s.abOpcode[offOpcode + 2],
3202 pVCpu->iem.s.abOpcode[offOpcode + 3],
3203 pVCpu->iem.s.abOpcode[offOpcode + 4],
3204 pVCpu->iem.s.abOpcode[offOpcode + 5],
3205 pVCpu->iem.s.abOpcode[offOpcode + 6],
3206 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3207# endif
3208 pVCpu->iem.s.offOpcode = offOpcode + 8;
3209 }
3210 else
3211 *pu64 = 0;
3212 return rcStrict;
3213}
3214
3215
3216/**
3217 * Fetches the next opcode qword.
3218 *
3219 * @returns Strict VBox status code.
3220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3221 * @param pu64 Where to return the opcode qword.
3222 */
3223DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3224{
3225 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3226 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3227 {
3228# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3229 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3230# else
3231 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3232 pVCpu->iem.s.abOpcode[offOpcode + 1],
3233 pVCpu->iem.s.abOpcode[offOpcode + 2],
3234 pVCpu->iem.s.abOpcode[offOpcode + 3],
3235 pVCpu->iem.s.abOpcode[offOpcode + 4],
3236 pVCpu->iem.s.abOpcode[offOpcode + 5],
3237 pVCpu->iem.s.abOpcode[offOpcode + 6],
3238 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3239# endif
3240 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3241 return VINF_SUCCESS;
3242 }
3243 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3244}
3245
3246#else /* IEM_WITH_SETJMP */
3247
3248/**
3249 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3250 *
3251 * @returns The opcode qword.
3252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3253 */
3254DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3255{
3256# ifdef IEM_WITH_CODE_TLB
3257 uint64_t u64;
3258 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3259 return u64;
3260# else
3261 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3262 if (rcStrict == VINF_SUCCESS)
3263 {
3264 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3265 pVCpu->iem.s.offOpcode = offOpcode + 8;
3266# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3267 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3268# else
3269 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3270 pVCpu->iem.s.abOpcode[offOpcode + 1],
3271 pVCpu->iem.s.abOpcode[offOpcode + 2],
3272 pVCpu->iem.s.abOpcode[offOpcode + 3],
3273 pVCpu->iem.s.abOpcode[offOpcode + 4],
3274 pVCpu->iem.s.abOpcode[offOpcode + 5],
3275 pVCpu->iem.s.abOpcode[offOpcode + 6],
3276 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3277# endif
3278 }
3279 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3280# endif
3281}
3282
3283
3284/**
3285 * Fetches the next opcode qword, longjmp on error.
3286 *
3287 * @returns The opcode qword.
3288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3289 */
3290DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3291{
3292# ifdef IEM_WITH_CODE_TLB
3293 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3294 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3295 if (RT_LIKELY( pbBuf != NULL
3296 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3297 {
3298 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3299# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3300 return *(uint64_t const *)&pbBuf[offBuf];
3301# else
3302 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3303 pbBuf[offBuf + 1],
3304 pbBuf[offBuf + 2],
3305 pbBuf[offBuf + 3],
3306 pbBuf[offBuf + 4],
3307 pbBuf[offBuf + 5],
3308 pbBuf[offBuf + 6],
3309 pbBuf[offBuf + 7]);
3310# endif
3311 }
3312# else
3313 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3314 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3315 {
3316 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3317# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3318 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3319# else
3320 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3321 pVCpu->iem.s.abOpcode[offOpcode + 1],
3322 pVCpu->iem.s.abOpcode[offOpcode + 2],
3323 pVCpu->iem.s.abOpcode[offOpcode + 3],
3324 pVCpu->iem.s.abOpcode[offOpcode + 4],
3325 pVCpu->iem.s.abOpcode[offOpcode + 5],
3326 pVCpu->iem.s.abOpcode[offOpcode + 6],
3327 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3328# endif
3329 }
3330# endif
3331 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3332}
3333
3334#endif /* IEM_WITH_SETJMP */
3335
3336/**
3337 * Fetches the next opcode quad word, returns automatically on failure.
3338 *
3339 * @param a_pu64 Where to return the opcode quad word.
3340 * @remark Implicitly references pVCpu.
3341 */
3342#ifndef IEM_WITH_SETJMP
3343# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3344 do \
3345 { \
3346 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3347 if (rcStrict2 != VINF_SUCCESS) \
3348 return rcStrict2; \
3349 } while (0)
3350#else
3351# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3352#endif
3353
3354
3355/** @name Misc Worker Functions.
3356 * @{
3357 */
3358
3359/**
3360 * Gets the exception class for the specified exception vector.
3361 *
3362 * @returns The class of the specified exception.
3363 * @param uVector The exception vector.
3364 */
3365IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3366{
3367 Assert(uVector <= X86_XCPT_LAST);
3368 switch (uVector)
3369 {
3370 case X86_XCPT_DE:
3371 case X86_XCPT_TS:
3372 case X86_XCPT_NP:
3373 case X86_XCPT_SS:
3374 case X86_XCPT_GP:
3375 case X86_XCPT_SX: /* AMD only */
3376 return IEMXCPTCLASS_CONTRIBUTORY;
3377
3378 case X86_XCPT_PF:
3379 case X86_XCPT_VE: /* Intel only */
3380 return IEMXCPTCLASS_PAGE_FAULT;
3381
3382 case X86_XCPT_DF:
3383 return IEMXCPTCLASS_DOUBLE_FAULT;
3384 }
3385 return IEMXCPTCLASS_BENIGN;
3386}
3387
3388
3389/**
3390 * Evaluates how to handle an exception caused during delivery of another event
3391 * (exception / interrupt).
3392 *
3393 * @returns How to handle the recursive exception.
3394 * @param pVCpu The cross context virtual CPU structure of the
3395 * calling thread.
3396 * @param fPrevFlags The flags of the previous event.
3397 * @param uPrevVector The vector of the previous event.
3398 * @param fCurFlags The flags of the current exception.
3399 * @param uCurVector The vector of the current exception.
3400 * @param pfXcptRaiseInfo Where to store additional information about the
3401 * exception condition. Optional.
3402 */
3403VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3404 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3405{
3406 /*
3407 * Only CPU exceptions can be raised while delivering other events, software interrupt
3408 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3409 */
3410 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3411 Assert(pVCpu); RT_NOREF(pVCpu);
3412 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3413
3414 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3415 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3416 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3417 {
3418 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3419 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3420 {
3421 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3422 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3423 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3424 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3425 {
3426 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3427 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3428 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3429 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3430 uCurVector, pVCpu->cpum.GstCtx.cr2));
3431 }
3432 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3433 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3434 {
3435 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3436 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3437 }
3438 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3439 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3440 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3441 {
3442 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3443 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3444 }
3445 }
3446 else
3447 {
3448 if (uPrevVector == X86_XCPT_NMI)
3449 {
3450 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3451 if (uCurVector == X86_XCPT_PF)
3452 {
3453 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3454 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3455 }
3456 }
3457 else if ( uPrevVector == X86_XCPT_AC
3458 && uCurVector == X86_XCPT_AC)
3459 {
3460 enmRaise = IEMXCPTRAISE_CPU_HANG;
3461 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3462 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3463 }
3464 }
3465 }
3466 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3467 {
3468 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3469 if (uCurVector == X86_XCPT_PF)
3470 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3471 }
3472 else
3473 {
3474 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3475 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3476 }
3477
3478 if (pfXcptRaiseInfo)
3479 *pfXcptRaiseInfo = fRaiseInfo;
3480 return enmRaise;
3481}
3482
3483
3484/**
3485 * Enters the CPU shutdown state initiated by a triple fault or other
3486 * unrecoverable conditions.
3487 *
3488 * @returns Strict VBox status code.
3489 * @param pVCpu The cross context virtual CPU structure of the
3490 * calling thread.
3491 */
3492IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3493{
3494 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3495 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3496
3497 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3498 {
3499 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3500 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3501 }
3502
3503 RT_NOREF(pVCpu);
3504 return VINF_EM_TRIPLE_FAULT;
3505}
3506
3507
3508/**
3509 * Validates a new SS segment.
3510 *
3511 * @returns VBox strict status code.
3512 * @param pVCpu The cross context virtual CPU structure of the
3513 * calling thread.
3514 * @param NewSS The new SS selctor.
3515 * @param uCpl The CPL to load the stack for.
3516 * @param pDesc Where to return the descriptor.
3517 */
3518IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3519{
3520 /* Null selectors are not allowed (we're not called for dispatching
3521 interrupts with SS=0 in long mode). */
3522 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3523 {
3524 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3525 return iemRaiseTaskSwitchFault0(pVCpu);
3526 }
3527
3528 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3529 if ((NewSS & X86_SEL_RPL) != uCpl)
3530 {
3531 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3532 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3533 }
3534
3535 /*
3536 * Read the descriptor.
3537 */
3538 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3539 if (rcStrict != VINF_SUCCESS)
3540 return rcStrict;
3541
3542 /*
3543 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3544 */
3545 if (!pDesc->Legacy.Gen.u1DescType)
3546 {
3547 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3548 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3549 }
3550
3551 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3552 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3553 {
3554 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3555 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3556 }
3557 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3558 {
3559 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3560 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3561 }
3562
3563 /* Is it there? */
3564 /** @todo testcase: Is this checked before the canonical / limit check below? */
3565 if (!pDesc->Legacy.Gen.u1Present)
3566 {
3567 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3568 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3569 }
3570
3571 return VINF_SUCCESS;
3572}
3573
3574
3575/**
3576 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3577 * not.
3578 *
3579 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3580 */
3581#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3582# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3583#else
3584# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3585#endif
3586
3587/**
3588 * Updates the EFLAGS in the correct manner wrt. PATM.
3589 *
3590 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3591 * @param a_fEfl The new EFLAGS.
3592 */
3593#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3594# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3595#else
3596# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3597#endif
3598
3599
3600/** @} */
3601
3602/** @name Raising Exceptions.
3603 *
3604 * @{
3605 */
3606
3607
3608/**
3609 * Loads the specified stack far pointer from the TSS.
3610 *
3611 * @returns VBox strict status code.
3612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3613 * @param uCpl The CPL to load the stack for.
3614 * @param pSelSS Where to return the new stack segment.
3615 * @param puEsp Where to return the new stack pointer.
3616 */
3617IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3618{
3619 VBOXSTRICTRC rcStrict;
3620 Assert(uCpl < 4);
3621
3622 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3623 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3624 {
3625 /*
3626 * 16-bit TSS (X86TSS16).
3627 */
3628 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3629 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3630 {
3631 uint32_t off = uCpl * 4 + 2;
3632 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3633 {
3634 /** @todo check actual access pattern here. */
3635 uint32_t u32Tmp = 0; /* gcc maybe... */
3636 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3637 if (rcStrict == VINF_SUCCESS)
3638 {
3639 *puEsp = RT_LOWORD(u32Tmp);
3640 *pSelSS = RT_HIWORD(u32Tmp);
3641 return VINF_SUCCESS;
3642 }
3643 }
3644 else
3645 {
3646 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3647 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3648 }
3649 break;
3650 }
3651
3652 /*
3653 * 32-bit TSS (X86TSS32).
3654 */
3655 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3656 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3657 {
3658 uint32_t off = uCpl * 8 + 4;
3659 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3660 {
3661/** @todo check actual access pattern here. */
3662 uint64_t u64Tmp;
3663 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3664 if (rcStrict == VINF_SUCCESS)
3665 {
3666 *puEsp = u64Tmp & UINT32_MAX;
3667 *pSelSS = (RTSEL)(u64Tmp >> 32);
3668 return VINF_SUCCESS;
3669 }
3670 }
3671 else
3672 {
3673 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3674 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3675 }
3676 break;
3677 }
3678
3679 default:
3680 AssertFailed();
3681 rcStrict = VERR_IEM_IPE_4;
3682 break;
3683 }
3684
3685 *puEsp = 0; /* make gcc happy */
3686 *pSelSS = 0; /* make gcc happy */
3687 return rcStrict;
3688}
3689
3690
3691/**
3692 * Loads the specified stack pointer from the 64-bit TSS.
3693 *
3694 * @returns VBox strict status code.
3695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3696 * @param uCpl The CPL to load the stack for.
3697 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3698 * @param puRsp Where to return the new stack pointer.
3699 */
3700IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3701{
3702 Assert(uCpl < 4);
3703 Assert(uIst < 8);
3704 *puRsp = 0; /* make gcc happy */
3705
3706 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3707 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3708
3709 uint32_t off;
3710 if (uIst)
3711 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3712 else
3713 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3714 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3715 {
3716 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3717 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3718 }
3719
3720 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3721}
3722
3723
3724/**
3725 * Adjust the CPU state according to the exception being raised.
3726 *
3727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3728 * @param u8Vector The exception that has been raised.
3729 */
3730DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3731{
3732 switch (u8Vector)
3733 {
3734 case X86_XCPT_DB:
3735 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3736 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3737 break;
3738 /** @todo Read the AMD and Intel exception reference... */
3739 }
3740}
3741
3742
3743/**
3744 * Implements exceptions and interrupts for real mode.
3745 *
3746 * @returns VBox strict status code.
3747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3748 * @param cbInstr The number of bytes to offset rIP by in the return
3749 * address.
3750 * @param u8Vector The interrupt / exception vector number.
3751 * @param fFlags The flags.
3752 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3753 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3754 */
3755IEM_STATIC VBOXSTRICTRC
3756iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3757 uint8_t cbInstr,
3758 uint8_t u8Vector,
3759 uint32_t fFlags,
3760 uint16_t uErr,
3761 uint64_t uCr2)
3762{
3763 NOREF(uErr); NOREF(uCr2);
3764 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3765
3766 /*
3767 * Read the IDT entry.
3768 */
3769 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3770 {
3771 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3772 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3773 }
3774 RTFAR16 Idte;
3775 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3776 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3777 {
3778 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3779 return rcStrict;
3780 }
3781
3782 /*
3783 * Push the stack frame.
3784 */
3785 uint16_t *pu16Frame;
3786 uint64_t uNewRsp;
3787 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3788 if (rcStrict != VINF_SUCCESS)
3789 return rcStrict;
3790
3791 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3792#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3793 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3794 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3795 fEfl |= UINT16_C(0xf000);
3796#endif
3797 pu16Frame[2] = (uint16_t)fEfl;
3798 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3799 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3800 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3801 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3802 return rcStrict;
3803
3804 /*
3805 * Load the vector address into cs:ip and make exception specific state
3806 * adjustments.
3807 */
3808 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3809 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3810 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3811 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3812 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3813 pVCpu->cpum.GstCtx.rip = Idte.off;
3814 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3815 IEMMISC_SET_EFL(pVCpu, fEfl);
3816
3817 /** @todo do we actually do this in real mode? */
3818 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3819 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3820
3821 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3822}
3823
3824
3825/**
3826 * Loads a NULL data selector into when coming from V8086 mode.
3827 *
3828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3829 * @param pSReg Pointer to the segment register.
3830 */
3831IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3832{
3833 pSReg->Sel = 0;
3834 pSReg->ValidSel = 0;
3835 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3836 {
3837 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3838 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3839 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3840 }
3841 else
3842 {
3843 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3844 /** @todo check this on AMD-V */
3845 pSReg->u64Base = 0;
3846 pSReg->u32Limit = 0;
3847 }
3848}
3849
3850
3851/**
3852 * Loads a segment selector during a task switch in V8086 mode.
3853 *
3854 * @param pSReg Pointer to the segment register.
3855 * @param uSel The selector value to load.
3856 */
3857IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3858{
3859 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3860 pSReg->Sel = uSel;
3861 pSReg->ValidSel = uSel;
3862 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3863 pSReg->u64Base = uSel << 4;
3864 pSReg->u32Limit = 0xffff;
3865 pSReg->Attr.u = 0xf3;
3866}
3867
3868
3869/**
3870 * Loads a NULL data selector into a selector register, both the hidden and
3871 * visible parts, in protected mode.
3872 *
3873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3874 * @param pSReg Pointer to the segment register.
3875 * @param uRpl The RPL.
3876 */
3877IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3878{
3879 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3880 * data selector in protected mode. */
3881 pSReg->Sel = uRpl;
3882 pSReg->ValidSel = uRpl;
3883 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3884 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3885 {
3886 /* VT-x (Intel 3960x) observed doing something like this. */
3887 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3888 pSReg->u32Limit = UINT32_MAX;
3889 pSReg->u64Base = 0;
3890 }
3891 else
3892 {
3893 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3894 pSReg->u32Limit = 0;
3895 pSReg->u64Base = 0;
3896 }
3897}
3898
3899
3900/**
3901 * Loads a segment selector during a task switch in protected mode.
3902 *
3903 * In this task switch scenario, we would throw \#TS exceptions rather than
3904 * \#GPs.
3905 *
3906 * @returns VBox strict status code.
3907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3908 * @param pSReg Pointer to the segment register.
3909 * @param uSel The new selector value.
3910 *
3911 * @remarks This does _not_ handle CS or SS.
3912 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3913 */
3914IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3915{
3916 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3917
3918 /* Null data selector. */
3919 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3920 {
3921 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3922 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3923 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3924 return VINF_SUCCESS;
3925 }
3926
3927 /* Fetch the descriptor. */
3928 IEMSELDESC Desc;
3929 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3930 if (rcStrict != VINF_SUCCESS)
3931 {
3932 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3933 VBOXSTRICTRC_VAL(rcStrict)));
3934 return rcStrict;
3935 }
3936
3937 /* Must be a data segment or readable code segment. */
3938 if ( !Desc.Legacy.Gen.u1DescType
3939 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3940 {
3941 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3942 Desc.Legacy.Gen.u4Type));
3943 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3944 }
3945
3946 /* Check privileges for data segments and non-conforming code segments. */
3947 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3948 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3949 {
3950 /* The RPL and the new CPL must be less than or equal to the DPL. */
3951 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3952 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3953 {
3954 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3955 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3957 }
3958 }
3959
3960 /* Is it there? */
3961 if (!Desc.Legacy.Gen.u1Present)
3962 {
3963 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3964 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3965 }
3966
3967 /* The base and limit. */
3968 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3969 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3970
3971 /*
3972 * Ok, everything checked out fine. Now set the accessed bit before
3973 * committing the result into the registers.
3974 */
3975 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3976 {
3977 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3978 if (rcStrict != VINF_SUCCESS)
3979 return rcStrict;
3980 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3981 }
3982
3983 /* Commit */
3984 pSReg->Sel = uSel;
3985 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3986 pSReg->u32Limit = cbLimit;
3987 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3988 pSReg->ValidSel = uSel;
3989 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3990 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3991 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3992
3993 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3994 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3995 return VINF_SUCCESS;
3996}
3997
3998
3999/**
4000 * Performs a task switch.
4001 *
4002 * If the task switch is the result of a JMP, CALL or IRET instruction, the
4003 * caller is responsible for performing the necessary checks (like DPL, TSS
4004 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
4005 * reference for JMP, CALL, IRET.
4006 *
4007 * If the task switch is the due to a software interrupt or hardware exception,
4008 * the caller is responsible for validating the TSS selector and descriptor. See
4009 * Intel Instruction reference for INT n.
4010 *
4011 * @returns VBox strict status code.
4012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4013 * @param enmTaskSwitch The cause of the task switch.
4014 * @param uNextEip The EIP effective after the task switch.
4015 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4016 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4017 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4018 * @param SelTSS The TSS selector of the new task.
4019 * @param pNewDescTSS Pointer to the new TSS descriptor.
4020 */
4021IEM_STATIC VBOXSTRICTRC
4022iemTaskSwitch(PVMCPU pVCpu,
4023 IEMTASKSWITCH enmTaskSwitch,
4024 uint32_t uNextEip,
4025 uint32_t fFlags,
4026 uint16_t uErr,
4027 uint64_t uCr2,
4028 RTSEL SelTSS,
4029 PIEMSELDESC pNewDescTSS)
4030{
4031 Assert(!IEM_IS_REAL_MODE(pVCpu));
4032 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4033 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4034
4035 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4036 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4037 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4038 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4039 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4040
4041 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4042 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4043
4044 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4045 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4046
4047 /* Update CR2 in case it's a page-fault. */
4048 /** @todo This should probably be done much earlier in IEM/PGM. See
4049 * @bugref{5653#c49}. */
4050 if (fFlags & IEM_XCPT_FLAGS_CR2)
4051 pVCpu->cpum.GstCtx.cr2 = uCr2;
4052
4053 /*
4054 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4055 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4056 */
4057 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4058 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4059 if (uNewTSSLimit < uNewTSSLimitMin)
4060 {
4061 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4062 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4063 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4064 }
4065
4066 /*
4067 * Task switches in VMX non-root mode always cause task switches.
4068 * The new TSS must have been read and validated (DPL, limits etc.) before a
4069 * task-switch VM-exit commences.
4070 *
4071 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4072 */
4073 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4074 {
4075 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4076 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4077 }
4078
4079 /*
4080 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4081 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4082 */
4083 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4084 {
4085 uint32_t const uExitInfo1 = SelTSS;
4086 uint32_t uExitInfo2 = uErr;
4087 switch (enmTaskSwitch)
4088 {
4089 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4090 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4091 default: break;
4092 }
4093 if (fFlags & IEM_XCPT_FLAGS_ERR)
4094 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4095 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4096 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4097
4098 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4099 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4100 RT_NOREF2(uExitInfo1, uExitInfo2);
4101 }
4102
4103 /*
4104 * Check the current TSS limit. The last written byte to the current TSS during the
4105 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4106 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4107 *
4108 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4109 * end up with smaller than "legal" TSS limits.
4110 */
4111 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4112 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4113 if (uCurTSSLimit < uCurTSSLimitMin)
4114 {
4115 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4116 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4117 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4118 }
4119
4120 /*
4121 * Verify that the new TSS can be accessed and map it. Map only the required contents
4122 * and not the entire TSS.
4123 */
4124 void *pvNewTSS;
4125 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4126 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4127 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4128 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4129 * not perform correct translation if this happens. See Intel spec. 7.2.1
4130 * "Task-State Segment" */
4131 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4132 if (rcStrict != VINF_SUCCESS)
4133 {
4134 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4135 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4136 return rcStrict;
4137 }
4138
4139 /*
4140 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4141 */
4142 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4143 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4144 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4145 {
4146 PX86DESC pDescCurTSS;
4147 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4148 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4149 if (rcStrict != VINF_SUCCESS)
4150 {
4151 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4152 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4153 return rcStrict;
4154 }
4155
4156 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4157 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4158 if (rcStrict != VINF_SUCCESS)
4159 {
4160 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4161 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4162 return rcStrict;
4163 }
4164
4165 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4166 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4167 {
4168 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4169 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4170 u32EFlags &= ~X86_EFL_NT;
4171 }
4172 }
4173
4174 /*
4175 * Save the CPU state into the current TSS.
4176 */
4177 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4178 if (GCPtrNewTSS == GCPtrCurTSS)
4179 {
4180 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4181 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4182 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4183 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4184 pVCpu->cpum.GstCtx.ldtr.Sel));
4185 }
4186 if (fIsNewTSS386)
4187 {
4188 /*
4189 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4190 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4191 */
4192 void *pvCurTSS32;
4193 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4194 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4195 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4196 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4197 if (rcStrict != VINF_SUCCESS)
4198 {
4199 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4200 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4201 return rcStrict;
4202 }
4203
4204 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4205 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4206 pCurTSS32->eip = uNextEip;
4207 pCurTSS32->eflags = u32EFlags;
4208 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4209 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4210 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4211 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4212 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4213 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4214 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4215 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4216 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4217 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4218 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4219 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4220 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4221 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4222
4223 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4224 if (rcStrict != VINF_SUCCESS)
4225 {
4226 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4227 VBOXSTRICTRC_VAL(rcStrict)));
4228 return rcStrict;
4229 }
4230 }
4231 else
4232 {
4233 /*
4234 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4235 */
4236 void *pvCurTSS16;
4237 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4238 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4239 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4240 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4241 if (rcStrict != VINF_SUCCESS)
4242 {
4243 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4244 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4245 return rcStrict;
4246 }
4247
4248 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4249 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4250 pCurTSS16->ip = uNextEip;
4251 pCurTSS16->flags = u32EFlags;
4252 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4253 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4254 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4255 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4256 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4257 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4258 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4259 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4260 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4261 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4262 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4263 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4264
4265 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4266 if (rcStrict != VINF_SUCCESS)
4267 {
4268 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4269 VBOXSTRICTRC_VAL(rcStrict)));
4270 return rcStrict;
4271 }
4272 }
4273
4274 /*
4275 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4276 */
4277 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4278 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4279 {
4280 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4281 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4282 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4283 }
4284
4285 /*
4286 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4287 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4288 */
4289 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4290 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4291 bool fNewDebugTrap;
4292 if (fIsNewTSS386)
4293 {
4294 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4295 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4296 uNewEip = pNewTSS32->eip;
4297 uNewEflags = pNewTSS32->eflags;
4298 uNewEax = pNewTSS32->eax;
4299 uNewEcx = pNewTSS32->ecx;
4300 uNewEdx = pNewTSS32->edx;
4301 uNewEbx = pNewTSS32->ebx;
4302 uNewEsp = pNewTSS32->esp;
4303 uNewEbp = pNewTSS32->ebp;
4304 uNewEsi = pNewTSS32->esi;
4305 uNewEdi = pNewTSS32->edi;
4306 uNewES = pNewTSS32->es;
4307 uNewCS = pNewTSS32->cs;
4308 uNewSS = pNewTSS32->ss;
4309 uNewDS = pNewTSS32->ds;
4310 uNewFS = pNewTSS32->fs;
4311 uNewGS = pNewTSS32->gs;
4312 uNewLdt = pNewTSS32->selLdt;
4313 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4314 }
4315 else
4316 {
4317 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4318 uNewCr3 = 0;
4319 uNewEip = pNewTSS16->ip;
4320 uNewEflags = pNewTSS16->flags;
4321 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4322 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4323 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4324 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4325 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4326 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4327 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4328 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4329 uNewES = pNewTSS16->es;
4330 uNewCS = pNewTSS16->cs;
4331 uNewSS = pNewTSS16->ss;
4332 uNewDS = pNewTSS16->ds;
4333 uNewFS = 0;
4334 uNewGS = 0;
4335 uNewLdt = pNewTSS16->selLdt;
4336 fNewDebugTrap = false;
4337 }
4338
4339 if (GCPtrNewTSS == GCPtrCurTSS)
4340 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4341 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4342
4343 /*
4344 * We're done accessing the new TSS.
4345 */
4346 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4347 if (rcStrict != VINF_SUCCESS)
4348 {
4349 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4350 return rcStrict;
4351 }
4352
4353 /*
4354 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4355 */
4356 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4357 {
4358 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4359 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4360 if (rcStrict != VINF_SUCCESS)
4361 {
4362 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4363 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4364 return rcStrict;
4365 }
4366
4367 /* Check that the descriptor indicates the new TSS is available (not busy). */
4368 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4369 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4370 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4371
4372 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4373 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4374 if (rcStrict != VINF_SUCCESS)
4375 {
4376 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4377 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4378 return rcStrict;
4379 }
4380 }
4381
4382 /*
4383 * From this point on, we're technically in the new task. We will defer exceptions
4384 * until the completion of the task switch but before executing any instructions in the new task.
4385 */
4386 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4387 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4388 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4389 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4390 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4391 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4392 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4393
4394 /* Set the busy bit in TR. */
4395 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4396 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4397 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4398 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4399 {
4400 uNewEflags |= X86_EFL_NT;
4401 }
4402
4403 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4404 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4405 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4406
4407 pVCpu->cpum.GstCtx.eip = uNewEip;
4408 pVCpu->cpum.GstCtx.eax = uNewEax;
4409 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4410 pVCpu->cpum.GstCtx.edx = uNewEdx;
4411 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4412 pVCpu->cpum.GstCtx.esp = uNewEsp;
4413 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4414 pVCpu->cpum.GstCtx.esi = uNewEsi;
4415 pVCpu->cpum.GstCtx.edi = uNewEdi;
4416
4417 uNewEflags &= X86_EFL_LIVE_MASK;
4418 uNewEflags |= X86_EFL_RA1_MASK;
4419 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4420
4421 /*
4422 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4423 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4424 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4425 */
4426 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4427 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4428
4429 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4430 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4431
4432 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4433 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4434
4435 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4436 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4437
4438 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4439 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4440
4441 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4442 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4443 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4444
4445 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4446 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4447 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4448 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4449
4450 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4451 {
4452 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4453 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4454 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4455 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4456 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4457 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4458 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4459 }
4460
4461 /*
4462 * Switch CR3 for the new task.
4463 */
4464 if ( fIsNewTSS386
4465 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4466 {
4467 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4468 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4469 AssertRCSuccessReturn(rc, rc);
4470
4471 /* Inform PGM. */
4472 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4473 AssertRCReturn(rc, rc);
4474 /* ignore informational status codes */
4475
4476 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4477 }
4478
4479 /*
4480 * Switch LDTR for the new task.
4481 */
4482 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4483 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4484 else
4485 {
4486 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4487
4488 IEMSELDESC DescNewLdt;
4489 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4490 if (rcStrict != VINF_SUCCESS)
4491 {
4492 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4493 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4494 return rcStrict;
4495 }
4496 if ( !DescNewLdt.Legacy.Gen.u1Present
4497 || DescNewLdt.Legacy.Gen.u1DescType
4498 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4499 {
4500 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4501 uNewLdt, DescNewLdt.Legacy.u));
4502 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4503 }
4504
4505 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4506 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4507 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4508 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4509 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4510 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4511 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4512 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4513 }
4514
4515 IEMSELDESC DescSS;
4516 if (IEM_IS_V86_MODE(pVCpu))
4517 {
4518 pVCpu->iem.s.uCpl = 3;
4519 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4520 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4521 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4522 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4523 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4524 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4525
4526 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4527 DescSS.Legacy.u = 0;
4528 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4529 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4530 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4531 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4532 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4533 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4534 DescSS.Legacy.Gen.u2Dpl = 3;
4535 }
4536 else
4537 {
4538 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4539
4540 /*
4541 * Load the stack segment for the new task.
4542 */
4543 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4544 {
4545 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4546 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4547 }
4548
4549 /* Fetch the descriptor. */
4550 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4551 if (rcStrict != VINF_SUCCESS)
4552 {
4553 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4554 VBOXSTRICTRC_VAL(rcStrict)));
4555 return rcStrict;
4556 }
4557
4558 /* SS must be a data segment and writable. */
4559 if ( !DescSS.Legacy.Gen.u1DescType
4560 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4561 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4562 {
4563 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4564 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4565 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4566 }
4567
4568 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4569 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4570 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4571 {
4572 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4573 uNewCpl));
4574 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4575 }
4576
4577 /* Is it there? */
4578 if (!DescSS.Legacy.Gen.u1Present)
4579 {
4580 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4581 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4582 }
4583
4584 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4585 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4586
4587 /* Set the accessed bit before committing the result into SS. */
4588 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4589 {
4590 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4591 if (rcStrict != VINF_SUCCESS)
4592 return rcStrict;
4593 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4594 }
4595
4596 /* Commit SS. */
4597 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4598 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4599 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4600 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4601 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4602 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4603 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4604
4605 /* CPL has changed, update IEM before loading rest of segments. */
4606 pVCpu->iem.s.uCpl = uNewCpl;
4607
4608 /*
4609 * Load the data segments for the new task.
4610 */
4611 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4612 if (rcStrict != VINF_SUCCESS)
4613 return rcStrict;
4614 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4615 if (rcStrict != VINF_SUCCESS)
4616 return rcStrict;
4617 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4618 if (rcStrict != VINF_SUCCESS)
4619 return rcStrict;
4620 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4621 if (rcStrict != VINF_SUCCESS)
4622 return rcStrict;
4623
4624 /*
4625 * Load the code segment for the new task.
4626 */
4627 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4628 {
4629 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4630 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4631 }
4632
4633 /* Fetch the descriptor. */
4634 IEMSELDESC DescCS;
4635 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4636 if (rcStrict != VINF_SUCCESS)
4637 {
4638 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4639 return rcStrict;
4640 }
4641
4642 /* CS must be a code segment. */
4643 if ( !DescCS.Legacy.Gen.u1DescType
4644 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4645 {
4646 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4647 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4648 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4649 }
4650
4651 /* For conforming CS, DPL must be less than or equal to the RPL. */
4652 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4653 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4654 {
4655 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4656 DescCS.Legacy.Gen.u2Dpl));
4657 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4658 }
4659
4660 /* For non-conforming CS, DPL must match RPL. */
4661 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4662 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4663 {
4664 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4665 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4666 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4667 }
4668
4669 /* Is it there? */
4670 if (!DescCS.Legacy.Gen.u1Present)
4671 {
4672 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4673 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4674 }
4675
4676 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4677 u64Base = X86DESC_BASE(&DescCS.Legacy);
4678
4679 /* Set the accessed bit before committing the result into CS. */
4680 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4681 {
4682 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4683 if (rcStrict != VINF_SUCCESS)
4684 return rcStrict;
4685 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4686 }
4687
4688 /* Commit CS. */
4689 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4690 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4691 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4692 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4693 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4694 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4695 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4696 }
4697
4698 /** @todo Debug trap. */
4699 if (fIsNewTSS386 && fNewDebugTrap)
4700 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4701
4702 /*
4703 * Construct the error code masks based on what caused this task switch.
4704 * See Intel Instruction reference for INT.
4705 */
4706 uint16_t uExt;
4707 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4708 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4709 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4710 {
4711 uExt = 1;
4712 }
4713 else
4714 uExt = 0;
4715
4716 /*
4717 * Push any error code on to the new stack.
4718 */
4719 if (fFlags & IEM_XCPT_FLAGS_ERR)
4720 {
4721 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4722 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4723 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4724
4725 /* Check that there is sufficient space on the stack. */
4726 /** @todo Factor out segment limit checking for normal/expand down segments
4727 * into a separate function. */
4728 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4729 {
4730 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4731 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4732 {
4733 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4734 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4735 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4736 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4737 }
4738 }
4739 else
4740 {
4741 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4742 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4743 {
4744 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4745 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4746 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4747 }
4748 }
4749
4750
4751 if (fIsNewTSS386)
4752 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4753 else
4754 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4755 if (rcStrict != VINF_SUCCESS)
4756 {
4757 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4758 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4759 return rcStrict;
4760 }
4761 }
4762
4763 /* Check the new EIP against the new CS limit. */
4764 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4765 {
4766 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4767 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4768 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4769 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4770 }
4771
4772 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4773 pVCpu->cpum.GstCtx.ss.Sel));
4774 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4775}
4776
4777
4778/**
4779 * Implements exceptions and interrupts for protected mode.
4780 *
4781 * @returns VBox strict status code.
4782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4783 * @param cbInstr The number of bytes to offset rIP by in the return
4784 * address.
4785 * @param u8Vector The interrupt / exception vector number.
4786 * @param fFlags The flags.
4787 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4788 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4789 */
4790IEM_STATIC VBOXSTRICTRC
4791iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4792 uint8_t cbInstr,
4793 uint8_t u8Vector,
4794 uint32_t fFlags,
4795 uint16_t uErr,
4796 uint64_t uCr2)
4797{
4798 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4799
4800 /*
4801 * Read the IDT entry.
4802 */
4803 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4804 {
4805 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4806 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4807 }
4808 X86DESC Idte;
4809 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4810 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4811 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4812 {
4813 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4814 return rcStrict;
4815 }
4816 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4817 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4818 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4819
4820 /*
4821 * Check the descriptor type, DPL and such.
4822 * ASSUMES this is done in the same order as described for call-gate calls.
4823 */
4824 if (Idte.Gate.u1DescType)
4825 {
4826 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4827 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4828 }
4829 bool fTaskGate = false;
4830 uint8_t f32BitGate = true;
4831 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4832 switch (Idte.Gate.u4Type)
4833 {
4834 case X86_SEL_TYPE_SYS_UNDEFINED:
4835 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4836 case X86_SEL_TYPE_SYS_LDT:
4837 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4838 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4839 case X86_SEL_TYPE_SYS_UNDEFINED2:
4840 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4841 case X86_SEL_TYPE_SYS_UNDEFINED3:
4842 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4843 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4844 case X86_SEL_TYPE_SYS_UNDEFINED4:
4845 {
4846 /** @todo check what actually happens when the type is wrong...
4847 * esp. call gates. */
4848 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4849 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4850 }
4851
4852 case X86_SEL_TYPE_SYS_286_INT_GATE:
4853 f32BitGate = false;
4854 RT_FALL_THRU();
4855 case X86_SEL_TYPE_SYS_386_INT_GATE:
4856 fEflToClear |= X86_EFL_IF;
4857 break;
4858
4859 case X86_SEL_TYPE_SYS_TASK_GATE:
4860 fTaskGate = true;
4861#ifndef IEM_IMPLEMENTS_TASKSWITCH
4862 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4863#endif
4864 break;
4865
4866 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4867 f32BitGate = false;
4868 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4869 break;
4870
4871 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4872 }
4873
4874 /* Check DPL against CPL if applicable. */
4875 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4876 {
4877 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4878 {
4879 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4880 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4881 }
4882 }
4883
4884 /* Is it there? */
4885 if (!Idte.Gate.u1Present)
4886 {
4887 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4888 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4889 }
4890
4891 /* Is it a task-gate? */
4892 if (fTaskGate)
4893 {
4894 /*
4895 * Construct the error code masks based on what caused this task switch.
4896 * See Intel Instruction reference for INT.
4897 */
4898 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4899 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4900 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4901 RTSEL SelTSS = Idte.Gate.u16Sel;
4902
4903 /*
4904 * Fetch the TSS descriptor in the GDT.
4905 */
4906 IEMSELDESC DescTSS;
4907 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4908 if (rcStrict != VINF_SUCCESS)
4909 {
4910 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4911 VBOXSTRICTRC_VAL(rcStrict)));
4912 return rcStrict;
4913 }
4914
4915 /* The TSS descriptor must be a system segment and be available (not busy). */
4916 if ( DescTSS.Legacy.Gen.u1DescType
4917 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4918 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4919 {
4920 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4921 u8Vector, SelTSS, DescTSS.Legacy.au64));
4922 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4923 }
4924
4925 /* The TSS must be present. */
4926 if (!DescTSS.Legacy.Gen.u1Present)
4927 {
4928 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4929 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4930 }
4931
4932 /* Do the actual task switch. */
4933 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4934 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4935 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4936 }
4937
4938 /* A null CS is bad. */
4939 RTSEL NewCS = Idte.Gate.u16Sel;
4940 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4941 {
4942 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4943 return iemRaiseGeneralProtectionFault0(pVCpu);
4944 }
4945
4946 /* Fetch the descriptor for the new CS. */
4947 IEMSELDESC DescCS;
4948 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4949 if (rcStrict != VINF_SUCCESS)
4950 {
4951 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4952 return rcStrict;
4953 }
4954
4955 /* Must be a code segment. */
4956 if (!DescCS.Legacy.Gen.u1DescType)
4957 {
4958 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4959 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4960 }
4961 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4962 {
4963 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4964 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4965 }
4966
4967 /* Don't allow lowering the privilege level. */
4968 /** @todo Does the lowering of privileges apply to software interrupts
4969 * only? This has bearings on the more-privileged or
4970 * same-privilege stack behavior further down. A testcase would
4971 * be nice. */
4972 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4973 {
4974 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4975 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4976 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4977 }
4978
4979 /* Make sure the selector is present. */
4980 if (!DescCS.Legacy.Gen.u1Present)
4981 {
4982 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4983 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4984 }
4985
4986 /* Check the new EIP against the new CS limit. */
4987 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4988 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4989 ? Idte.Gate.u16OffsetLow
4990 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4991 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4992 if (uNewEip > cbLimitCS)
4993 {
4994 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4995 u8Vector, uNewEip, cbLimitCS, NewCS));
4996 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4997 }
4998 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4999
5000 /* Calc the flag image to push. */
5001 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5002 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5003 fEfl &= ~X86_EFL_RF;
5004 else
5005 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5006
5007 /* From V8086 mode only go to CPL 0. */
5008 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5009 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5010 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5011 {
5012 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5013 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5014 }
5015
5016 /*
5017 * If the privilege level changes, we need to get a new stack from the TSS.
5018 * This in turns means validating the new SS and ESP...
5019 */
5020 if (uNewCpl != pVCpu->iem.s.uCpl)
5021 {
5022 RTSEL NewSS;
5023 uint32_t uNewEsp;
5024 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5025 if (rcStrict != VINF_SUCCESS)
5026 return rcStrict;
5027
5028 IEMSELDESC DescSS;
5029 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5030 if (rcStrict != VINF_SUCCESS)
5031 return rcStrict;
5032 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5033 if (!DescSS.Legacy.Gen.u1DefBig)
5034 {
5035 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5036 uNewEsp = (uint16_t)uNewEsp;
5037 }
5038
5039 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5040
5041 /* Check that there is sufficient space for the stack frame. */
5042 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5043 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5044 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5045 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5046
5047 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5048 {
5049 if ( uNewEsp - 1 > cbLimitSS
5050 || uNewEsp < cbStackFrame)
5051 {
5052 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5053 u8Vector, NewSS, uNewEsp, cbStackFrame));
5054 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5055 }
5056 }
5057 else
5058 {
5059 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5060 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5061 {
5062 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5063 u8Vector, NewSS, uNewEsp, cbStackFrame));
5064 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5065 }
5066 }
5067
5068 /*
5069 * Start making changes.
5070 */
5071
5072 /* Set the new CPL so that stack accesses use it. */
5073 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5074 pVCpu->iem.s.uCpl = uNewCpl;
5075
5076 /* Create the stack frame. */
5077 RTPTRUNION uStackFrame;
5078 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5079 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5080 if (rcStrict != VINF_SUCCESS)
5081 return rcStrict;
5082 void * const pvStackFrame = uStackFrame.pv;
5083 if (f32BitGate)
5084 {
5085 if (fFlags & IEM_XCPT_FLAGS_ERR)
5086 *uStackFrame.pu32++ = uErr;
5087 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5088 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5089 uStackFrame.pu32[2] = fEfl;
5090 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5091 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5092 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5093 if (fEfl & X86_EFL_VM)
5094 {
5095 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5096 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5097 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5098 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5099 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5100 }
5101 }
5102 else
5103 {
5104 if (fFlags & IEM_XCPT_FLAGS_ERR)
5105 *uStackFrame.pu16++ = uErr;
5106 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5107 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5108 uStackFrame.pu16[2] = fEfl;
5109 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5110 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5111 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5112 if (fEfl & X86_EFL_VM)
5113 {
5114 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5115 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5116 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5117 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5118 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5119 }
5120 }
5121 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5122 if (rcStrict != VINF_SUCCESS)
5123 return rcStrict;
5124
5125 /* Mark the selectors 'accessed' (hope this is the correct time). */
5126 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5127 * after pushing the stack frame? (Write protect the gdt + stack to
5128 * find out.) */
5129 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5130 {
5131 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5132 if (rcStrict != VINF_SUCCESS)
5133 return rcStrict;
5134 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5135 }
5136
5137 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5138 {
5139 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5140 if (rcStrict != VINF_SUCCESS)
5141 return rcStrict;
5142 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5143 }
5144
5145 /*
5146 * Start comitting the register changes (joins with the DPL=CPL branch).
5147 */
5148 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5149 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5150 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5151 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5152 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5153 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5154 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5155 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5156 * SP is loaded).
5157 * Need to check the other combinations too:
5158 * - 16-bit TSS, 32-bit handler
5159 * - 32-bit TSS, 16-bit handler */
5160 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5161 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5162 else
5163 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5164
5165 if (fEfl & X86_EFL_VM)
5166 {
5167 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5168 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5169 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5170 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5171 }
5172 }
5173 /*
5174 * Same privilege, no stack change and smaller stack frame.
5175 */
5176 else
5177 {
5178 uint64_t uNewRsp;
5179 RTPTRUNION uStackFrame;
5180 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5181 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5182 if (rcStrict != VINF_SUCCESS)
5183 return rcStrict;
5184 void * const pvStackFrame = uStackFrame.pv;
5185
5186 if (f32BitGate)
5187 {
5188 if (fFlags & IEM_XCPT_FLAGS_ERR)
5189 *uStackFrame.pu32++ = uErr;
5190 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5191 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5192 uStackFrame.pu32[2] = fEfl;
5193 }
5194 else
5195 {
5196 if (fFlags & IEM_XCPT_FLAGS_ERR)
5197 *uStackFrame.pu16++ = uErr;
5198 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5199 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5200 uStackFrame.pu16[2] = fEfl;
5201 }
5202 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5203 if (rcStrict != VINF_SUCCESS)
5204 return rcStrict;
5205
5206 /* Mark the CS selector as 'accessed'. */
5207 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5208 {
5209 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5210 if (rcStrict != VINF_SUCCESS)
5211 return rcStrict;
5212 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5213 }
5214
5215 /*
5216 * Start committing the register changes (joins with the other branch).
5217 */
5218 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5219 }
5220
5221 /* ... register committing continues. */
5222 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5223 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5224 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5225 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5226 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5227 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5228
5229 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5230 fEfl &= ~fEflToClear;
5231 IEMMISC_SET_EFL(pVCpu, fEfl);
5232
5233 if (fFlags & IEM_XCPT_FLAGS_CR2)
5234 pVCpu->cpum.GstCtx.cr2 = uCr2;
5235
5236 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5237 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5238
5239 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5240}
5241
5242
5243/**
5244 * Implements exceptions and interrupts for long mode.
5245 *
5246 * @returns VBox strict status code.
5247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5248 * @param cbInstr The number of bytes to offset rIP by in the return
5249 * address.
5250 * @param u8Vector The interrupt / exception vector number.
5251 * @param fFlags The flags.
5252 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5253 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5254 */
5255IEM_STATIC VBOXSTRICTRC
5256iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5257 uint8_t cbInstr,
5258 uint8_t u8Vector,
5259 uint32_t fFlags,
5260 uint16_t uErr,
5261 uint64_t uCr2)
5262{
5263 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5264
5265 /*
5266 * Read the IDT entry.
5267 */
5268 uint16_t offIdt = (uint16_t)u8Vector << 4;
5269 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5270 {
5271 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5272 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5273 }
5274 X86DESC64 Idte;
5275 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5276 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5277 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5278 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5279 {
5280 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5281 return rcStrict;
5282 }
5283 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5284 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5285 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5286
5287 /*
5288 * Check the descriptor type, DPL and such.
5289 * ASSUMES this is done in the same order as described for call-gate calls.
5290 */
5291 if (Idte.Gate.u1DescType)
5292 {
5293 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5294 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5295 }
5296 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5297 switch (Idte.Gate.u4Type)
5298 {
5299 case AMD64_SEL_TYPE_SYS_INT_GATE:
5300 fEflToClear |= X86_EFL_IF;
5301 break;
5302 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5303 break;
5304
5305 default:
5306 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5307 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5308 }
5309
5310 /* Check DPL against CPL if applicable. */
5311 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5312 {
5313 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5314 {
5315 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5316 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5317 }
5318 }
5319
5320 /* Is it there? */
5321 if (!Idte.Gate.u1Present)
5322 {
5323 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5324 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5325 }
5326
5327 /* A null CS is bad. */
5328 RTSEL NewCS = Idte.Gate.u16Sel;
5329 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5330 {
5331 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5332 return iemRaiseGeneralProtectionFault0(pVCpu);
5333 }
5334
5335 /* Fetch the descriptor for the new CS. */
5336 IEMSELDESC DescCS;
5337 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5338 if (rcStrict != VINF_SUCCESS)
5339 {
5340 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5341 return rcStrict;
5342 }
5343
5344 /* Must be a 64-bit code segment. */
5345 if (!DescCS.Long.Gen.u1DescType)
5346 {
5347 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5348 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5349 }
5350 if ( !DescCS.Long.Gen.u1Long
5351 || DescCS.Long.Gen.u1DefBig
5352 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5353 {
5354 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5355 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5356 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5357 }
5358
5359 /* Don't allow lowering the privilege level. For non-conforming CS
5360 selectors, the CS.DPL sets the privilege level the trap/interrupt
5361 handler runs at. For conforming CS selectors, the CPL remains
5362 unchanged, but the CS.DPL must be <= CPL. */
5363 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5364 * when CPU in Ring-0. Result \#GP? */
5365 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5366 {
5367 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5368 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5369 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5370 }
5371
5372
5373 /* Make sure the selector is present. */
5374 if (!DescCS.Legacy.Gen.u1Present)
5375 {
5376 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5377 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5378 }
5379
5380 /* Check that the new RIP is canonical. */
5381 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5382 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5383 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5384 if (!IEM_IS_CANONICAL(uNewRip))
5385 {
5386 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5387 return iemRaiseGeneralProtectionFault0(pVCpu);
5388 }
5389
5390 /*
5391 * If the privilege level changes or if the IST isn't zero, we need to get
5392 * a new stack from the TSS.
5393 */
5394 uint64_t uNewRsp;
5395 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5396 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5397 if ( uNewCpl != pVCpu->iem.s.uCpl
5398 || Idte.Gate.u3IST != 0)
5399 {
5400 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5401 if (rcStrict != VINF_SUCCESS)
5402 return rcStrict;
5403 }
5404 else
5405 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5406 uNewRsp &= ~(uint64_t)0xf;
5407
5408 /*
5409 * Calc the flag image to push.
5410 */
5411 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5412 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5413 fEfl &= ~X86_EFL_RF;
5414 else
5415 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5416
5417 /*
5418 * Start making changes.
5419 */
5420 /* Set the new CPL so that stack accesses use it. */
5421 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5422 pVCpu->iem.s.uCpl = uNewCpl;
5423
5424 /* Create the stack frame. */
5425 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5426 RTPTRUNION uStackFrame;
5427 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5428 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5429 if (rcStrict != VINF_SUCCESS)
5430 return rcStrict;
5431 void * const pvStackFrame = uStackFrame.pv;
5432
5433 if (fFlags & IEM_XCPT_FLAGS_ERR)
5434 *uStackFrame.pu64++ = uErr;
5435 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5436 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5437 uStackFrame.pu64[2] = fEfl;
5438 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5439 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5440 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5441 if (rcStrict != VINF_SUCCESS)
5442 return rcStrict;
5443
5444 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5445 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5446 * after pushing the stack frame? (Write protect the gdt + stack to
5447 * find out.) */
5448 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5449 {
5450 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5451 if (rcStrict != VINF_SUCCESS)
5452 return rcStrict;
5453 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5454 }
5455
5456 /*
5457 * Start comitting the register changes.
5458 */
5459 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5460 * hidden registers when interrupting 32-bit or 16-bit code! */
5461 if (uNewCpl != uOldCpl)
5462 {
5463 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5464 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5465 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5466 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5467 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5468 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5469 }
5470 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5471 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5472 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5473 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5474 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5475 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5476 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5477 pVCpu->cpum.GstCtx.rip = uNewRip;
5478
5479 fEfl &= ~fEflToClear;
5480 IEMMISC_SET_EFL(pVCpu, fEfl);
5481
5482 if (fFlags & IEM_XCPT_FLAGS_CR2)
5483 pVCpu->cpum.GstCtx.cr2 = uCr2;
5484
5485 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5486 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5487
5488 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5489}
5490
5491
5492/**
5493 * Implements exceptions and interrupts.
5494 *
5495 * All exceptions and interrupts goes thru this function!
5496 *
5497 * @returns VBox strict status code.
5498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5499 * @param cbInstr The number of bytes to offset rIP by in the return
5500 * address.
5501 * @param u8Vector The interrupt / exception vector number.
5502 * @param fFlags The flags.
5503 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5504 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5505 */
5506DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5507iemRaiseXcptOrInt(PVMCPU pVCpu,
5508 uint8_t cbInstr,
5509 uint8_t u8Vector,
5510 uint32_t fFlags,
5511 uint16_t uErr,
5512 uint64_t uCr2)
5513{
5514 /*
5515 * Get all the state that we might need here.
5516 */
5517 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5518 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5519
5520#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5521 /*
5522 * Flush prefetch buffer
5523 */
5524 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5525#endif
5526
5527 /*
5528 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5529 */
5530 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5531 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5532 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5533 | IEM_XCPT_FLAGS_BP_INSTR
5534 | IEM_XCPT_FLAGS_ICEBP_INSTR
5535 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5536 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5537 {
5538 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5539 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5540 u8Vector = X86_XCPT_GP;
5541 uErr = 0;
5542 }
5543#ifdef DBGFTRACE_ENABLED
5544 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5545 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5546 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5547#endif
5548
5549 /*
5550 * Evaluate whether NMI blocking should be in effect.
5551 * Normally, NMI blocking is in effect whenever we inject an NMI.
5552 */
5553 bool fBlockNmi;
5554 if ( u8Vector == X86_XCPT_NMI
5555 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5556 fBlockNmi = true;
5557 else
5558 fBlockNmi = false;
5559
5560#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5561 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5562 {
5563 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5564 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5565 return rcStrict0;
5566
5567 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5568 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5569 {
5570 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5571 fBlockNmi = false;
5572 }
5573 }
5574#endif
5575
5576#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5577 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5578 {
5579 /*
5580 * If the event is being injected as part of VMRUN, it isn't subject to event
5581 * intercepts in the nested-guest. However, secondary exceptions that occur
5582 * during injection of any event -are- subject to exception intercepts.
5583 *
5584 * See AMD spec. 15.20 "Event Injection".
5585 */
5586 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5587 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5588 else
5589 {
5590 /*
5591 * Check and handle if the event being raised is intercepted.
5592 */
5593 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5594 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5595 return rcStrict0;
5596 }
5597 }
5598#endif
5599
5600 /*
5601 * Set NMI blocking if necessary.
5602 */
5603 if ( fBlockNmi
5604 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5605 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5606
5607 /*
5608 * Do recursion accounting.
5609 */
5610 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5611 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5612 if (pVCpu->iem.s.cXcptRecursions == 0)
5613 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5614 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5615 else
5616 {
5617 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5618 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5619 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5620
5621 if (pVCpu->iem.s.cXcptRecursions >= 4)
5622 {
5623#ifdef DEBUG_bird
5624 AssertFailed();
5625#endif
5626 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5627 }
5628
5629 /*
5630 * Evaluate the sequence of recurring events.
5631 */
5632 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5633 NULL /* pXcptRaiseInfo */);
5634 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5635 { /* likely */ }
5636 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5637 {
5638 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5639 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5640 u8Vector = X86_XCPT_DF;
5641 uErr = 0;
5642#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5643 /* VMX nested-guest #DF intercept needs to be checked here. */
5644 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5645 {
5646 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5647 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5648 return rcStrict0;
5649 }
5650#endif
5651 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5652 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5653 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5654 }
5655 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5656 {
5657 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5658 return iemInitiateCpuShutdown(pVCpu);
5659 }
5660 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5661 {
5662 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5663 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5664 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5665 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5666 return VERR_EM_GUEST_CPU_HANG;
5667 }
5668 else
5669 {
5670 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5671 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5672 return VERR_IEM_IPE_9;
5673 }
5674
5675 /*
5676 * The 'EXT' bit is set when an exception occurs during deliver of an external
5677 * event (such as an interrupt or earlier exception)[1]. Privileged software
5678 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5679 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5680 *
5681 * [1] - Intel spec. 6.13 "Error Code"
5682 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5683 * [3] - Intel Instruction reference for INT n.
5684 */
5685 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5686 && (fFlags & IEM_XCPT_FLAGS_ERR)
5687 && u8Vector != X86_XCPT_PF
5688 && u8Vector != X86_XCPT_DF)
5689 {
5690 uErr |= X86_TRAP_ERR_EXTERNAL;
5691 }
5692 }
5693
5694 pVCpu->iem.s.cXcptRecursions++;
5695 pVCpu->iem.s.uCurXcpt = u8Vector;
5696 pVCpu->iem.s.fCurXcpt = fFlags;
5697 pVCpu->iem.s.uCurXcptErr = uErr;
5698 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5699
5700 /*
5701 * Extensive logging.
5702 */
5703#if defined(LOG_ENABLED) && defined(IN_RING3)
5704 if (LogIs3Enabled())
5705 {
5706 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5707 PVM pVM = pVCpu->CTX_SUFF(pVM);
5708 char szRegs[4096];
5709 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5710 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5711 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5712 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5713 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5714 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5715 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5716 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5717 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5718 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5719 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5720 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5721 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5722 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5723 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5724 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5725 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5726 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5727 " efer=%016VR{efer}\n"
5728 " pat=%016VR{pat}\n"
5729 " sf_mask=%016VR{sf_mask}\n"
5730 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5731 " lstar=%016VR{lstar}\n"
5732 " star=%016VR{star} cstar=%016VR{cstar}\n"
5733 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5734 );
5735
5736 char szInstr[256];
5737 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5738 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5739 szInstr, sizeof(szInstr), NULL);
5740 Log3(("%s%s\n", szRegs, szInstr));
5741 }
5742#endif /* LOG_ENABLED */
5743
5744 /*
5745 * Call the mode specific worker function.
5746 */
5747 VBOXSTRICTRC rcStrict;
5748 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5749 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5750 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5751 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5752 else
5753 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5754
5755 /* Flush the prefetch buffer. */
5756#ifdef IEM_WITH_CODE_TLB
5757 pVCpu->iem.s.pbInstrBuf = NULL;
5758#else
5759 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5760#endif
5761
5762 /*
5763 * Unwind.
5764 */
5765 pVCpu->iem.s.cXcptRecursions--;
5766 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5767 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5768 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5769 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5770 pVCpu->iem.s.cXcptRecursions + 1));
5771 return rcStrict;
5772}
5773
5774#ifdef IEM_WITH_SETJMP
5775/**
5776 * See iemRaiseXcptOrInt. Will not return.
5777 */
5778IEM_STATIC DECL_NO_RETURN(void)
5779iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5780 uint8_t cbInstr,
5781 uint8_t u8Vector,
5782 uint32_t fFlags,
5783 uint16_t uErr,
5784 uint64_t uCr2)
5785{
5786 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5787 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5788}
5789#endif
5790
5791
5792/** \#DE - 00. */
5793DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5794{
5795 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5796}
5797
5798
5799/** \#DB - 01.
5800 * @note This automatically clear DR7.GD. */
5801DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5802{
5803 /** @todo set/clear RF. */
5804 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5805 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5806}
5807
5808
5809/** \#BR - 05. */
5810DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5811{
5812 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5813}
5814
5815
5816/** \#UD - 06. */
5817DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5818{
5819 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5820}
5821
5822
5823/** \#NM - 07. */
5824DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5825{
5826 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5827}
5828
5829
5830/** \#TS(err) - 0a. */
5831DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5832{
5833 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5834}
5835
5836
5837/** \#TS(tr) - 0a. */
5838DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5839{
5840 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5841 pVCpu->cpum.GstCtx.tr.Sel, 0);
5842}
5843
5844
5845/** \#TS(0) - 0a. */
5846DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5847{
5848 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5849 0, 0);
5850}
5851
5852
5853/** \#TS(err) - 0a. */
5854DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5855{
5856 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5857 uSel & X86_SEL_MASK_OFF_RPL, 0);
5858}
5859
5860
5861/** \#NP(err) - 0b. */
5862DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5863{
5864 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5865}
5866
5867
5868/** \#NP(sel) - 0b. */
5869DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5870{
5871 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5872 uSel & ~X86_SEL_RPL, 0);
5873}
5874
5875
5876/** \#SS(seg) - 0c. */
5877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5878{
5879 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5880 uSel & ~X86_SEL_RPL, 0);
5881}
5882
5883
5884/** \#SS(err) - 0c. */
5885DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5886{
5887 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5888}
5889
5890
5891/** \#GP(n) - 0d. */
5892DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5893{
5894 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5895}
5896
5897
5898/** \#GP(0) - 0d. */
5899DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5900{
5901 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5902}
5903
5904#ifdef IEM_WITH_SETJMP
5905/** \#GP(0) - 0d. */
5906DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5907{
5908 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5909}
5910#endif
5911
5912
5913/** \#GP(sel) - 0d. */
5914DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5915{
5916 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5917 Sel & ~X86_SEL_RPL, 0);
5918}
5919
5920
5921/** \#GP(0) - 0d. */
5922DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5923{
5924 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5925}
5926
5927
5928/** \#GP(sel) - 0d. */
5929DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5930{
5931 NOREF(iSegReg); NOREF(fAccess);
5932 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5933 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5934}
5935
5936#ifdef IEM_WITH_SETJMP
5937/** \#GP(sel) - 0d, longjmp. */
5938DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5939{
5940 NOREF(iSegReg); NOREF(fAccess);
5941 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5942 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5943}
5944#endif
5945
5946/** \#GP(sel) - 0d. */
5947DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5948{
5949 NOREF(Sel);
5950 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5951}
5952
5953#ifdef IEM_WITH_SETJMP
5954/** \#GP(sel) - 0d, longjmp. */
5955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5956{
5957 NOREF(Sel);
5958 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5959}
5960#endif
5961
5962
5963/** \#GP(sel) - 0d. */
5964DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5965{
5966 NOREF(iSegReg); NOREF(fAccess);
5967 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5968}
5969
5970#ifdef IEM_WITH_SETJMP
5971/** \#GP(sel) - 0d, longjmp. */
5972DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5973 uint32_t fAccess)
5974{
5975 NOREF(iSegReg); NOREF(fAccess);
5976 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5977}
5978#endif
5979
5980
5981/** \#PF(n) - 0e. */
5982DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5983{
5984 uint16_t uErr;
5985 switch (rc)
5986 {
5987 case VERR_PAGE_NOT_PRESENT:
5988 case VERR_PAGE_TABLE_NOT_PRESENT:
5989 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5990 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5991 uErr = 0;
5992 break;
5993
5994 default:
5995 AssertMsgFailed(("%Rrc\n", rc));
5996 RT_FALL_THRU();
5997 case VERR_ACCESS_DENIED:
5998 uErr = X86_TRAP_PF_P;
5999 break;
6000
6001 /** @todo reserved */
6002 }
6003
6004 if (pVCpu->iem.s.uCpl == 3)
6005 uErr |= X86_TRAP_PF_US;
6006
6007 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
6008 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6009 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
6010 uErr |= X86_TRAP_PF_ID;
6011
6012#if 0 /* This is so much non-sense, really. Why was it done like that? */
6013 /* Note! RW access callers reporting a WRITE protection fault, will clear
6014 the READ flag before calling. So, read-modify-write accesses (RW)
6015 can safely be reported as READ faults. */
6016 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6017 uErr |= X86_TRAP_PF_RW;
6018#else
6019 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6020 {
6021 if (!(fAccess & IEM_ACCESS_TYPE_READ))
6022 uErr |= X86_TRAP_PF_RW;
6023 }
6024#endif
6025
6026 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6027 uErr, GCPtrWhere);
6028}
6029
6030#ifdef IEM_WITH_SETJMP
6031/** \#PF(n) - 0e, longjmp. */
6032IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6033{
6034 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6035}
6036#endif
6037
6038
6039/** \#MF(0) - 10. */
6040DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6041{
6042 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6043}
6044
6045
6046/** \#AC(0) - 11. */
6047DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6048{
6049 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6050}
6051
6052
6053/**
6054 * Macro for calling iemCImplRaiseDivideError().
6055 *
6056 * This enables us to add/remove arguments and force different levels of
6057 * inlining as we wish.
6058 *
6059 * @return Strict VBox status code.
6060 */
6061#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6062IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6063{
6064 NOREF(cbInstr);
6065 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6066}
6067
6068
6069/**
6070 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6071 *
6072 * This enables us to add/remove arguments and force different levels of
6073 * inlining as we wish.
6074 *
6075 * @return Strict VBox status code.
6076 */
6077#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6078IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6079{
6080 NOREF(cbInstr);
6081 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6082}
6083
6084
6085/**
6086 * Macro for calling iemCImplRaiseInvalidOpcode().
6087 *
6088 * This enables us to add/remove arguments and force different levels of
6089 * inlining as we wish.
6090 *
6091 * @return Strict VBox status code.
6092 */
6093#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6094IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6095{
6096 NOREF(cbInstr);
6097 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6098}
6099
6100
6101/** @} */
6102
6103
6104/*
6105 *
6106 * Helpers routines.
6107 * Helpers routines.
6108 * Helpers routines.
6109 *
6110 */
6111
6112/**
6113 * Recalculates the effective operand size.
6114 *
6115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6116 */
6117IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6118{
6119 switch (pVCpu->iem.s.enmCpuMode)
6120 {
6121 case IEMMODE_16BIT:
6122 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6123 break;
6124 case IEMMODE_32BIT:
6125 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6126 break;
6127 case IEMMODE_64BIT:
6128 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6129 {
6130 case 0:
6131 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6132 break;
6133 case IEM_OP_PRF_SIZE_OP:
6134 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6135 break;
6136 case IEM_OP_PRF_SIZE_REX_W:
6137 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6138 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6139 break;
6140 }
6141 break;
6142 default:
6143 AssertFailed();
6144 }
6145}
6146
6147
6148/**
6149 * Sets the default operand size to 64-bit and recalculates the effective
6150 * operand size.
6151 *
6152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6153 */
6154IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6155{
6156 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6157 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6158 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6159 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6160 else
6161 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6162}
6163
6164
6165/*
6166 *
6167 * Common opcode decoders.
6168 * Common opcode decoders.
6169 * Common opcode decoders.
6170 *
6171 */
6172//#include <iprt/mem.h>
6173
6174/**
6175 * Used to add extra details about a stub case.
6176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6177 */
6178IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6179{
6180#if defined(LOG_ENABLED) && defined(IN_RING3)
6181 PVM pVM = pVCpu->CTX_SUFF(pVM);
6182 char szRegs[4096];
6183 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6184 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6185 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6186 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6187 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6188 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6189 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6190 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6191 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6192 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6193 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6194 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6195 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6196 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6197 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6198 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6199 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6200 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6201 " efer=%016VR{efer}\n"
6202 " pat=%016VR{pat}\n"
6203 " sf_mask=%016VR{sf_mask}\n"
6204 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6205 " lstar=%016VR{lstar}\n"
6206 " star=%016VR{star} cstar=%016VR{cstar}\n"
6207 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6208 );
6209
6210 char szInstr[256];
6211 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6212 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6213 szInstr, sizeof(szInstr), NULL);
6214
6215 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6216#else
6217 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6218#endif
6219}
6220
6221/**
6222 * Complains about a stub.
6223 *
6224 * Providing two versions of this macro, one for daily use and one for use when
6225 * working on IEM.
6226 */
6227#if 0
6228# define IEMOP_BITCH_ABOUT_STUB() \
6229 do { \
6230 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6231 iemOpStubMsg2(pVCpu); \
6232 RTAssertPanic(); \
6233 } while (0)
6234#else
6235# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6236#endif
6237
6238/** Stubs an opcode. */
6239#define FNIEMOP_STUB(a_Name) \
6240 FNIEMOP_DEF(a_Name) \
6241 { \
6242 RT_NOREF_PV(pVCpu); \
6243 IEMOP_BITCH_ABOUT_STUB(); \
6244 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6245 } \
6246 typedef int ignore_semicolon
6247
6248/** Stubs an opcode. */
6249#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6250 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6251 { \
6252 RT_NOREF_PV(pVCpu); \
6253 RT_NOREF_PV(a_Name0); \
6254 IEMOP_BITCH_ABOUT_STUB(); \
6255 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6256 } \
6257 typedef int ignore_semicolon
6258
6259/** Stubs an opcode which currently should raise \#UD. */
6260#define FNIEMOP_UD_STUB(a_Name) \
6261 FNIEMOP_DEF(a_Name) \
6262 { \
6263 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6264 return IEMOP_RAISE_INVALID_OPCODE(); \
6265 } \
6266 typedef int ignore_semicolon
6267
6268/** Stubs an opcode which currently should raise \#UD. */
6269#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6270 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6271 { \
6272 RT_NOREF_PV(pVCpu); \
6273 RT_NOREF_PV(a_Name0); \
6274 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6275 return IEMOP_RAISE_INVALID_OPCODE(); \
6276 } \
6277 typedef int ignore_semicolon
6278
6279
6280
6281/** @name Register Access.
6282 * @{
6283 */
6284
6285/**
6286 * Gets a reference (pointer) to the specified hidden segment register.
6287 *
6288 * @returns Hidden register reference.
6289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6290 * @param iSegReg The segment register.
6291 */
6292IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6293{
6294 Assert(iSegReg < X86_SREG_COUNT);
6295 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6296 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6297
6298#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6299 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6300 { /* likely */ }
6301 else
6302 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6303#else
6304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6305#endif
6306 return pSReg;
6307}
6308
6309
6310/**
6311 * Ensures that the given hidden segment register is up to date.
6312 *
6313 * @returns Hidden register reference.
6314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6315 * @param pSReg The segment register.
6316 */
6317IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6318{
6319#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6320 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6321 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6322#else
6323 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6324 NOREF(pVCpu);
6325#endif
6326 return pSReg;
6327}
6328
6329
6330/**
6331 * Gets a reference (pointer) to the specified segment register (the selector
6332 * value).
6333 *
6334 * @returns Pointer to the selector variable.
6335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6336 * @param iSegReg The segment register.
6337 */
6338DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6339{
6340 Assert(iSegReg < X86_SREG_COUNT);
6341 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6342 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6343}
6344
6345
6346/**
6347 * Fetches the selector value of a segment register.
6348 *
6349 * @returns The selector value.
6350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6351 * @param iSegReg The segment register.
6352 */
6353DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6354{
6355 Assert(iSegReg < X86_SREG_COUNT);
6356 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6357 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6358}
6359
6360
6361/**
6362 * Fetches the base address value of a segment register.
6363 *
6364 * @returns The selector value.
6365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6366 * @param iSegReg The segment register.
6367 */
6368DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6369{
6370 Assert(iSegReg < X86_SREG_COUNT);
6371 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6372 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6373}
6374
6375
6376/**
6377 * Gets a reference (pointer) to the specified general purpose register.
6378 *
6379 * @returns Register reference.
6380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6381 * @param iReg The general purpose register.
6382 */
6383DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6384{
6385 Assert(iReg < 16);
6386 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6387}
6388
6389
6390/**
6391 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6392 *
6393 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6394 *
6395 * @returns Register reference.
6396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6397 * @param iReg The register.
6398 */
6399DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6400{
6401 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6402 {
6403 Assert(iReg < 16);
6404 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6405 }
6406 /* high 8-bit register. */
6407 Assert(iReg < 8);
6408 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6409}
6410
6411
6412/**
6413 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6414 *
6415 * @returns Register reference.
6416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6417 * @param iReg The register.
6418 */
6419DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6420{
6421 Assert(iReg < 16);
6422 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6423}
6424
6425
6426/**
6427 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6428 *
6429 * @returns Register reference.
6430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6431 * @param iReg The register.
6432 */
6433DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6434{
6435 Assert(iReg < 16);
6436 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6437}
6438
6439
6440/**
6441 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6442 *
6443 * @returns Register reference.
6444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6445 * @param iReg The register.
6446 */
6447DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6448{
6449 Assert(iReg < 64);
6450 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6451}
6452
6453
6454/**
6455 * Gets a reference (pointer) to the specified segment register's base address.
6456 *
6457 * @returns Segment register base address reference.
6458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6459 * @param iSegReg The segment selector.
6460 */
6461DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6462{
6463 Assert(iSegReg < X86_SREG_COUNT);
6464 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6465 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6466}
6467
6468
6469/**
6470 * Fetches the value of a 8-bit general purpose register.
6471 *
6472 * @returns The register value.
6473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6474 * @param iReg The register.
6475 */
6476DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6477{
6478 return *iemGRegRefU8(pVCpu, iReg);
6479}
6480
6481
6482/**
6483 * Fetches the value of a 16-bit general purpose register.
6484 *
6485 * @returns The register value.
6486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6487 * @param iReg The register.
6488 */
6489DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6490{
6491 Assert(iReg < 16);
6492 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6493}
6494
6495
6496/**
6497 * Fetches the value of a 32-bit general purpose register.
6498 *
6499 * @returns The register value.
6500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6501 * @param iReg The register.
6502 */
6503DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6504{
6505 Assert(iReg < 16);
6506 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6507}
6508
6509
6510/**
6511 * Fetches the value of a 64-bit general purpose register.
6512 *
6513 * @returns The register value.
6514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6515 * @param iReg The register.
6516 */
6517DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6518{
6519 Assert(iReg < 16);
6520 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6521}
6522
6523
6524/**
6525 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6526 *
6527 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6528 * segment limit.
6529 *
6530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6531 * @param offNextInstr The offset of the next instruction.
6532 */
6533IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6534{
6535 switch (pVCpu->iem.s.enmEffOpSize)
6536 {
6537 case IEMMODE_16BIT:
6538 {
6539 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6540 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6541 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6542 return iemRaiseGeneralProtectionFault0(pVCpu);
6543 pVCpu->cpum.GstCtx.rip = uNewIp;
6544 break;
6545 }
6546
6547 case IEMMODE_32BIT:
6548 {
6549 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6550 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6551
6552 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6553 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6554 return iemRaiseGeneralProtectionFault0(pVCpu);
6555 pVCpu->cpum.GstCtx.rip = uNewEip;
6556 break;
6557 }
6558
6559 case IEMMODE_64BIT:
6560 {
6561 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6562
6563 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6564 if (!IEM_IS_CANONICAL(uNewRip))
6565 return iemRaiseGeneralProtectionFault0(pVCpu);
6566 pVCpu->cpum.GstCtx.rip = uNewRip;
6567 break;
6568 }
6569
6570 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6571 }
6572
6573 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6574
6575#ifndef IEM_WITH_CODE_TLB
6576 /* Flush the prefetch buffer. */
6577 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6578#endif
6579
6580 return VINF_SUCCESS;
6581}
6582
6583
6584/**
6585 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6586 *
6587 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6588 * segment limit.
6589 *
6590 * @returns Strict VBox status code.
6591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6592 * @param offNextInstr The offset of the next instruction.
6593 */
6594IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6595{
6596 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6597
6598 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6599 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6600 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6601 return iemRaiseGeneralProtectionFault0(pVCpu);
6602 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6603 pVCpu->cpum.GstCtx.rip = uNewIp;
6604 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6605
6606#ifndef IEM_WITH_CODE_TLB
6607 /* Flush the prefetch buffer. */
6608 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6609#endif
6610
6611 return VINF_SUCCESS;
6612}
6613
6614
6615/**
6616 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6617 *
6618 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6619 * segment limit.
6620 *
6621 * @returns Strict VBox status code.
6622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6623 * @param offNextInstr The offset of the next instruction.
6624 */
6625IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6626{
6627 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6628
6629 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6630 {
6631 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6632
6633 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6634 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6635 return iemRaiseGeneralProtectionFault0(pVCpu);
6636 pVCpu->cpum.GstCtx.rip = uNewEip;
6637 }
6638 else
6639 {
6640 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6641
6642 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6643 if (!IEM_IS_CANONICAL(uNewRip))
6644 return iemRaiseGeneralProtectionFault0(pVCpu);
6645 pVCpu->cpum.GstCtx.rip = uNewRip;
6646 }
6647 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6648
6649#ifndef IEM_WITH_CODE_TLB
6650 /* Flush the prefetch buffer. */
6651 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6652#endif
6653
6654 return VINF_SUCCESS;
6655}
6656
6657
6658/**
6659 * Performs a near jump to the specified address.
6660 *
6661 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6662 * segment limit.
6663 *
6664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6665 * @param uNewRip The new RIP value.
6666 */
6667IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6668{
6669 switch (pVCpu->iem.s.enmEffOpSize)
6670 {
6671 case IEMMODE_16BIT:
6672 {
6673 Assert(uNewRip <= UINT16_MAX);
6674 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6675 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6676 return iemRaiseGeneralProtectionFault0(pVCpu);
6677 /** @todo Test 16-bit jump in 64-bit mode. */
6678 pVCpu->cpum.GstCtx.rip = uNewRip;
6679 break;
6680 }
6681
6682 case IEMMODE_32BIT:
6683 {
6684 Assert(uNewRip <= UINT32_MAX);
6685 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6686 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6687
6688 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6689 return iemRaiseGeneralProtectionFault0(pVCpu);
6690 pVCpu->cpum.GstCtx.rip = uNewRip;
6691 break;
6692 }
6693
6694 case IEMMODE_64BIT:
6695 {
6696 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6697
6698 if (!IEM_IS_CANONICAL(uNewRip))
6699 return iemRaiseGeneralProtectionFault0(pVCpu);
6700 pVCpu->cpum.GstCtx.rip = uNewRip;
6701 break;
6702 }
6703
6704 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6705 }
6706
6707 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6708
6709#ifndef IEM_WITH_CODE_TLB
6710 /* Flush the prefetch buffer. */
6711 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6712#endif
6713
6714 return VINF_SUCCESS;
6715}
6716
6717
6718/**
6719 * Get the address of the top of the stack.
6720 *
6721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6722 */
6723DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6724{
6725 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6726 return pVCpu->cpum.GstCtx.rsp;
6727 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6728 return pVCpu->cpum.GstCtx.esp;
6729 return pVCpu->cpum.GstCtx.sp;
6730}
6731
6732
6733/**
6734 * Updates the RIP/EIP/IP to point to the next instruction.
6735 *
6736 * This function leaves the EFLAGS.RF flag alone.
6737 *
6738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6739 * @param cbInstr The number of bytes to add.
6740 */
6741IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6742{
6743 switch (pVCpu->iem.s.enmCpuMode)
6744 {
6745 case IEMMODE_16BIT:
6746 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6747 pVCpu->cpum.GstCtx.eip += cbInstr;
6748 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6749 break;
6750
6751 case IEMMODE_32BIT:
6752 pVCpu->cpum.GstCtx.eip += cbInstr;
6753 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6754 break;
6755
6756 case IEMMODE_64BIT:
6757 pVCpu->cpum.GstCtx.rip += cbInstr;
6758 break;
6759 default: AssertFailed();
6760 }
6761}
6762
6763
6764#if 0
6765/**
6766 * Updates the RIP/EIP/IP to point to the next instruction.
6767 *
6768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6769 */
6770IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6771{
6772 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6773}
6774#endif
6775
6776
6777
6778/**
6779 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6780 *
6781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6782 * @param cbInstr The number of bytes to add.
6783 */
6784IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6785{
6786 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6787
6788 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6789#if ARCH_BITS >= 64
6790 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6791 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6792 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6793#else
6794 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6795 pVCpu->cpum.GstCtx.rip += cbInstr;
6796 else
6797 pVCpu->cpum.GstCtx.eip += cbInstr;
6798#endif
6799}
6800
6801
6802/**
6803 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6804 *
6805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6806 */
6807IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6808{
6809 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6810}
6811
6812
6813/**
6814 * Adds to the stack pointer.
6815 *
6816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6817 * @param cbToAdd The number of bytes to add (8-bit!).
6818 */
6819DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6820{
6821 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6822 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6823 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6824 pVCpu->cpum.GstCtx.esp += cbToAdd;
6825 else
6826 pVCpu->cpum.GstCtx.sp += cbToAdd;
6827}
6828
6829
6830/**
6831 * Subtracts from the stack pointer.
6832 *
6833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6834 * @param cbToSub The number of bytes to subtract (8-bit!).
6835 */
6836DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6837{
6838 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6839 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6840 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6841 pVCpu->cpum.GstCtx.esp -= cbToSub;
6842 else
6843 pVCpu->cpum.GstCtx.sp -= cbToSub;
6844}
6845
6846
6847/**
6848 * Adds to the temporary stack pointer.
6849 *
6850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6851 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6852 * @param cbToAdd The number of bytes to add (16-bit).
6853 */
6854DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6855{
6856 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6857 pTmpRsp->u += cbToAdd;
6858 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6859 pTmpRsp->DWords.dw0 += cbToAdd;
6860 else
6861 pTmpRsp->Words.w0 += cbToAdd;
6862}
6863
6864
6865/**
6866 * Subtracts from the temporary stack pointer.
6867 *
6868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6869 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6870 * @param cbToSub The number of bytes to subtract.
6871 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6872 * expecting that.
6873 */
6874DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6875{
6876 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6877 pTmpRsp->u -= cbToSub;
6878 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6879 pTmpRsp->DWords.dw0 -= cbToSub;
6880 else
6881 pTmpRsp->Words.w0 -= cbToSub;
6882}
6883
6884
6885/**
6886 * Calculates the effective stack address for a push of the specified size as
6887 * well as the new RSP value (upper bits may be masked).
6888 *
6889 * @returns Effective stack addressf for the push.
6890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6891 * @param cbItem The size of the stack item to pop.
6892 * @param puNewRsp Where to return the new RSP value.
6893 */
6894DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6895{
6896 RTUINT64U uTmpRsp;
6897 RTGCPTR GCPtrTop;
6898 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6899
6900 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6901 GCPtrTop = uTmpRsp.u -= cbItem;
6902 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6903 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6904 else
6905 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6906 *puNewRsp = uTmpRsp.u;
6907 return GCPtrTop;
6908}
6909
6910
6911/**
6912 * Gets the current stack pointer and calculates the value after a pop of the
6913 * specified size.
6914 *
6915 * @returns Current stack pointer.
6916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6917 * @param cbItem The size of the stack item to pop.
6918 * @param puNewRsp Where to return the new RSP value.
6919 */
6920DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6921{
6922 RTUINT64U uTmpRsp;
6923 RTGCPTR GCPtrTop;
6924 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6925
6926 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6927 {
6928 GCPtrTop = uTmpRsp.u;
6929 uTmpRsp.u += cbItem;
6930 }
6931 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6932 {
6933 GCPtrTop = uTmpRsp.DWords.dw0;
6934 uTmpRsp.DWords.dw0 += cbItem;
6935 }
6936 else
6937 {
6938 GCPtrTop = uTmpRsp.Words.w0;
6939 uTmpRsp.Words.w0 += cbItem;
6940 }
6941 *puNewRsp = uTmpRsp.u;
6942 return GCPtrTop;
6943}
6944
6945
6946/**
6947 * Calculates the effective stack address for a push of the specified size as
6948 * well as the new temporary RSP value (upper bits may be masked).
6949 *
6950 * @returns Effective stack addressf for the push.
6951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6952 * @param pTmpRsp The temporary stack pointer. This is updated.
6953 * @param cbItem The size of the stack item to pop.
6954 */
6955DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6956{
6957 RTGCPTR GCPtrTop;
6958
6959 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6960 GCPtrTop = pTmpRsp->u -= cbItem;
6961 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6962 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6963 else
6964 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6965 return GCPtrTop;
6966}
6967
6968
6969/**
6970 * Gets the effective stack address for a pop of the specified size and
6971 * calculates and updates the temporary RSP.
6972 *
6973 * @returns Current stack pointer.
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 * @param pTmpRsp The temporary stack pointer. This is updated.
6976 * @param cbItem The size of the stack item to pop.
6977 */
6978DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6979{
6980 RTGCPTR GCPtrTop;
6981 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6982 {
6983 GCPtrTop = pTmpRsp->u;
6984 pTmpRsp->u += cbItem;
6985 }
6986 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6987 {
6988 GCPtrTop = pTmpRsp->DWords.dw0;
6989 pTmpRsp->DWords.dw0 += cbItem;
6990 }
6991 else
6992 {
6993 GCPtrTop = pTmpRsp->Words.w0;
6994 pTmpRsp->Words.w0 += cbItem;
6995 }
6996 return GCPtrTop;
6997}
6998
6999/** @} */
7000
7001
7002/** @name FPU access and helpers.
7003 *
7004 * @{
7005 */
7006
7007
7008/**
7009 * Hook for preparing to use the host FPU.
7010 *
7011 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7012 *
7013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7014 */
7015DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
7016{
7017#ifdef IN_RING3
7018 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7019#else
7020 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7021#endif
7022 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7023}
7024
7025
7026/**
7027 * Hook for preparing to use the host FPU for SSE.
7028 *
7029 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7030 *
7031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7032 */
7033DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
7034{
7035 iemFpuPrepareUsage(pVCpu);
7036}
7037
7038
7039/**
7040 * Hook for preparing to use the host FPU for AVX.
7041 *
7042 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7043 *
7044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7045 */
7046DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7047{
7048 iemFpuPrepareUsage(pVCpu);
7049}
7050
7051
7052/**
7053 * Hook for actualizing the guest FPU state before the interpreter reads it.
7054 *
7055 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7056 *
7057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7058 */
7059DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7060{
7061#ifdef IN_RING3
7062 NOREF(pVCpu);
7063#else
7064 CPUMRZFpuStateActualizeForRead(pVCpu);
7065#endif
7066 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7067}
7068
7069
7070/**
7071 * Hook for actualizing the guest FPU state before the interpreter changes it.
7072 *
7073 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7074 *
7075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7076 */
7077DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7078{
7079#ifdef IN_RING3
7080 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7081#else
7082 CPUMRZFpuStateActualizeForChange(pVCpu);
7083#endif
7084 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7085}
7086
7087
7088/**
7089 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7090 * only.
7091 *
7092 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7093 *
7094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7095 */
7096DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7097{
7098#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7099 NOREF(pVCpu);
7100#else
7101 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7102#endif
7103 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7104}
7105
7106
7107/**
7108 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7109 * read+write.
7110 *
7111 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7112 *
7113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7114 */
7115DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7116{
7117#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7118 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7119#else
7120 CPUMRZFpuStateActualizeForChange(pVCpu);
7121#endif
7122 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7123}
7124
7125
7126/**
7127 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7128 * only.
7129 *
7130 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7131 *
7132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7133 */
7134DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7135{
7136#ifdef IN_RING3
7137 NOREF(pVCpu);
7138#else
7139 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7140#endif
7141 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7142}
7143
7144
7145/**
7146 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7147 * read+write.
7148 *
7149 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7150 *
7151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7152 */
7153DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7154{
7155#ifdef IN_RING3
7156 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7157#else
7158 CPUMRZFpuStateActualizeForChange(pVCpu);
7159#endif
7160 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7161}
7162
7163
7164/**
7165 * Stores a QNaN value into a FPU register.
7166 *
7167 * @param pReg Pointer to the register.
7168 */
7169DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7170{
7171 pReg->au32[0] = UINT32_C(0x00000000);
7172 pReg->au32[1] = UINT32_C(0xc0000000);
7173 pReg->au16[4] = UINT16_C(0xffff);
7174}
7175
7176
7177/**
7178 * Updates the FOP, FPU.CS and FPUIP registers.
7179 *
7180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7181 * @param pFpuCtx The FPU context.
7182 */
7183DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7184{
7185 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7186 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7187 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7188 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7189 {
7190 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7191 * happens in real mode here based on the fnsave and fnstenv images. */
7192 pFpuCtx->CS = 0;
7193 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7194 }
7195 else
7196 {
7197 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7198 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7199 }
7200}
7201
7202
7203/**
7204 * Updates the x87.DS and FPUDP registers.
7205 *
7206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7207 * @param pFpuCtx The FPU context.
7208 * @param iEffSeg The effective segment register.
7209 * @param GCPtrEff The effective address relative to @a iEffSeg.
7210 */
7211DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7212{
7213 RTSEL sel;
7214 switch (iEffSeg)
7215 {
7216 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7217 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7218 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7219 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7220 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7221 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7222 default:
7223 AssertMsgFailed(("%d\n", iEffSeg));
7224 sel = pVCpu->cpum.GstCtx.ds.Sel;
7225 }
7226 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7227 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7228 {
7229 pFpuCtx->DS = 0;
7230 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7231 }
7232 else
7233 {
7234 pFpuCtx->DS = sel;
7235 pFpuCtx->FPUDP = GCPtrEff;
7236 }
7237}
7238
7239
7240/**
7241 * Rotates the stack registers in the push direction.
7242 *
7243 * @param pFpuCtx The FPU context.
7244 * @remarks This is a complete waste of time, but fxsave stores the registers in
7245 * stack order.
7246 */
7247DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7248{
7249 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7250 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7251 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7252 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7253 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7254 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7255 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7256 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7257 pFpuCtx->aRegs[0].r80 = r80Tmp;
7258}
7259
7260
7261/**
7262 * Rotates the stack registers in the pop direction.
7263 *
7264 * @param pFpuCtx The FPU context.
7265 * @remarks This is a complete waste of time, but fxsave stores the registers in
7266 * stack order.
7267 */
7268DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7269{
7270 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7271 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7272 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7273 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7274 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7275 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7276 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7277 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7278 pFpuCtx->aRegs[7].r80 = r80Tmp;
7279}
7280
7281
7282/**
7283 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7284 * exception prevents it.
7285 *
7286 * @param pResult The FPU operation result to push.
7287 * @param pFpuCtx The FPU context.
7288 */
7289IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7290{
7291 /* Update FSW and bail if there are pending exceptions afterwards. */
7292 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7293 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7294 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7295 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7296 {
7297 pFpuCtx->FSW = fFsw;
7298 return;
7299 }
7300
7301 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7302 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7303 {
7304 /* All is fine, push the actual value. */
7305 pFpuCtx->FTW |= RT_BIT(iNewTop);
7306 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7307 }
7308 else if (pFpuCtx->FCW & X86_FCW_IM)
7309 {
7310 /* Masked stack overflow, push QNaN. */
7311 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7312 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7313 }
7314 else
7315 {
7316 /* Raise stack overflow, don't push anything. */
7317 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7318 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7319 return;
7320 }
7321
7322 fFsw &= ~X86_FSW_TOP_MASK;
7323 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7324 pFpuCtx->FSW = fFsw;
7325
7326 iemFpuRotateStackPush(pFpuCtx);
7327}
7328
7329
7330/**
7331 * Stores a result in a FPU register and updates the FSW and FTW.
7332 *
7333 * @param pFpuCtx The FPU context.
7334 * @param pResult The result to store.
7335 * @param iStReg Which FPU register to store it in.
7336 */
7337IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7338{
7339 Assert(iStReg < 8);
7340 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7341 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7342 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7343 pFpuCtx->FTW |= RT_BIT(iReg);
7344 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7345}
7346
7347
7348/**
7349 * Only updates the FPU status word (FSW) with the result of the current
7350 * instruction.
7351 *
7352 * @param pFpuCtx The FPU context.
7353 * @param u16FSW The FSW output of the current instruction.
7354 */
7355IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7356{
7357 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7358 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7359}
7360
7361
7362/**
7363 * Pops one item off the FPU stack if no pending exception prevents it.
7364 *
7365 * @param pFpuCtx The FPU context.
7366 */
7367IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7368{
7369 /* Check pending exceptions. */
7370 uint16_t uFSW = pFpuCtx->FSW;
7371 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7372 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7373 return;
7374
7375 /* TOP--. */
7376 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7377 uFSW &= ~X86_FSW_TOP_MASK;
7378 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7379 pFpuCtx->FSW = uFSW;
7380
7381 /* Mark the previous ST0 as empty. */
7382 iOldTop >>= X86_FSW_TOP_SHIFT;
7383 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7384
7385 /* Rotate the registers. */
7386 iemFpuRotateStackPop(pFpuCtx);
7387}
7388
7389
7390/**
7391 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7392 *
7393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7394 * @param pResult The FPU operation result to push.
7395 */
7396IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7397{
7398 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7399 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7400 iemFpuMaybePushResult(pResult, pFpuCtx);
7401}
7402
7403
7404/**
7405 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7406 * and sets FPUDP and FPUDS.
7407 *
7408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7409 * @param pResult The FPU operation result to push.
7410 * @param iEffSeg The effective segment register.
7411 * @param GCPtrEff The effective address relative to @a iEffSeg.
7412 */
7413IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7414{
7415 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7416 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7417 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7418 iemFpuMaybePushResult(pResult, pFpuCtx);
7419}
7420
7421
7422/**
7423 * Replace ST0 with the first value and push the second onto the FPU stack,
7424 * unless a pending exception prevents it.
7425 *
7426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7427 * @param pResult The FPU operation result to store and push.
7428 */
7429IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7430{
7431 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7432 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7433
7434 /* Update FSW and bail if there are pending exceptions afterwards. */
7435 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7436 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7437 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7438 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7439 {
7440 pFpuCtx->FSW = fFsw;
7441 return;
7442 }
7443
7444 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7445 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7446 {
7447 /* All is fine, push the actual value. */
7448 pFpuCtx->FTW |= RT_BIT(iNewTop);
7449 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7450 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7451 }
7452 else if (pFpuCtx->FCW & X86_FCW_IM)
7453 {
7454 /* Masked stack overflow, push QNaN. */
7455 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7456 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7457 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7458 }
7459 else
7460 {
7461 /* Raise stack overflow, don't push anything. */
7462 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7463 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7464 return;
7465 }
7466
7467 fFsw &= ~X86_FSW_TOP_MASK;
7468 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7469 pFpuCtx->FSW = fFsw;
7470
7471 iemFpuRotateStackPush(pFpuCtx);
7472}
7473
7474
7475/**
7476 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7477 * FOP.
7478 *
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param pResult The result to store.
7481 * @param iStReg Which FPU register to store it in.
7482 */
7483IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7484{
7485 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7486 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7487 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7488}
7489
7490
7491/**
7492 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7493 * FOP, and then pops the stack.
7494 *
7495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7496 * @param pResult The result to store.
7497 * @param iStReg Which FPU register to store it in.
7498 */
7499IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7500{
7501 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7502 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7503 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7504 iemFpuMaybePopOne(pFpuCtx);
7505}
7506
7507
7508/**
7509 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7510 * FPUDP, and FPUDS.
7511 *
7512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7513 * @param pResult The result to store.
7514 * @param iStReg Which FPU register to store it in.
7515 * @param iEffSeg The effective memory operand selector register.
7516 * @param GCPtrEff The effective memory operand offset.
7517 */
7518IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7519 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7520{
7521 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7522 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7523 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7524 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7525}
7526
7527
7528/**
7529 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7530 * FPUDP, and FPUDS, and then pops the stack.
7531 *
7532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7533 * @param pResult The result to store.
7534 * @param iStReg Which FPU register to store it in.
7535 * @param iEffSeg The effective memory operand selector register.
7536 * @param GCPtrEff The effective memory operand offset.
7537 */
7538IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7539 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7540{
7541 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7542 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7543 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7544 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7545 iemFpuMaybePopOne(pFpuCtx);
7546}
7547
7548
7549/**
7550 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7551 *
7552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7553 */
7554IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7555{
7556 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7557 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7558}
7559
7560
7561/**
7562 * Marks the specified stack register as free (for FFREE).
7563 *
7564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7565 * @param iStReg The register to free.
7566 */
7567IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7568{
7569 Assert(iStReg < 8);
7570 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7571 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7572 pFpuCtx->FTW &= ~RT_BIT(iReg);
7573}
7574
7575
7576/**
7577 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7578 *
7579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7580 */
7581IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7582{
7583 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7584 uint16_t uFsw = pFpuCtx->FSW;
7585 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7586 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7587 uFsw &= ~X86_FSW_TOP_MASK;
7588 uFsw |= uTop;
7589 pFpuCtx->FSW = uFsw;
7590}
7591
7592
7593/**
7594 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7595 *
7596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7597 */
7598IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7599{
7600 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7601 uint16_t uFsw = pFpuCtx->FSW;
7602 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7603 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7604 uFsw &= ~X86_FSW_TOP_MASK;
7605 uFsw |= uTop;
7606 pFpuCtx->FSW = uFsw;
7607}
7608
7609
7610/**
7611 * Updates the FSW, FOP, FPUIP, and FPUCS.
7612 *
7613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7614 * @param u16FSW The FSW from the current instruction.
7615 */
7616IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7617{
7618 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7619 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7620 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7621}
7622
7623
7624/**
7625 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7626 *
7627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7628 * @param u16FSW The FSW from the current instruction.
7629 */
7630IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7631{
7632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7633 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7634 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7635 iemFpuMaybePopOne(pFpuCtx);
7636}
7637
7638
7639/**
7640 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7641 *
7642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7643 * @param u16FSW The FSW from the current instruction.
7644 * @param iEffSeg The effective memory operand selector register.
7645 * @param GCPtrEff The effective memory operand offset.
7646 */
7647IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7648{
7649 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7650 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7651 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7652 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7653}
7654
7655
7656/**
7657 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7658 *
7659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7660 * @param u16FSW The FSW from the current instruction.
7661 */
7662IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7663{
7664 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7665 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7666 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7667 iemFpuMaybePopOne(pFpuCtx);
7668 iemFpuMaybePopOne(pFpuCtx);
7669}
7670
7671
7672/**
7673 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7674 *
7675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7676 * @param u16FSW The FSW from the current instruction.
7677 * @param iEffSeg The effective memory operand selector register.
7678 * @param GCPtrEff The effective memory operand offset.
7679 */
7680IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7681{
7682 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7683 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7684 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7685 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7686 iemFpuMaybePopOne(pFpuCtx);
7687}
7688
7689
7690/**
7691 * Worker routine for raising an FPU stack underflow exception.
7692 *
7693 * @param pFpuCtx The FPU context.
7694 * @param iStReg The stack register being accessed.
7695 */
7696IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7697{
7698 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7699 if (pFpuCtx->FCW & X86_FCW_IM)
7700 {
7701 /* Masked underflow. */
7702 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7703 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7704 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7705 if (iStReg != UINT8_MAX)
7706 {
7707 pFpuCtx->FTW |= RT_BIT(iReg);
7708 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7709 }
7710 }
7711 else
7712 {
7713 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7714 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7715 }
7716}
7717
7718
7719/**
7720 * Raises a FPU stack underflow exception.
7721 *
7722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7723 * @param iStReg The destination register that should be loaded
7724 * with QNaN if \#IS is not masked. Specify
7725 * UINT8_MAX if none (like for fcom).
7726 */
7727DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7728{
7729 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7730 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7731 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7732}
7733
7734
7735DECL_NO_INLINE(IEM_STATIC, void)
7736iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7737{
7738 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7739 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7740 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7741 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7742}
7743
7744
7745DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7746{
7747 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7748 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7749 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7750 iemFpuMaybePopOne(pFpuCtx);
7751}
7752
7753
7754DECL_NO_INLINE(IEM_STATIC, void)
7755iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7756{
7757 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7758 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7759 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7760 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7761 iemFpuMaybePopOne(pFpuCtx);
7762}
7763
7764
7765DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7766{
7767 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7768 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7769 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7770 iemFpuMaybePopOne(pFpuCtx);
7771 iemFpuMaybePopOne(pFpuCtx);
7772}
7773
7774
7775DECL_NO_INLINE(IEM_STATIC, void)
7776iemFpuStackPushUnderflow(PVMCPU pVCpu)
7777{
7778 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7779 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7780
7781 if (pFpuCtx->FCW & X86_FCW_IM)
7782 {
7783 /* Masked overflow - Push QNaN. */
7784 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7785 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7786 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7787 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7788 pFpuCtx->FTW |= RT_BIT(iNewTop);
7789 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7790 iemFpuRotateStackPush(pFpuCtx);
7791 }
7792 else
7793 {
7794 /* Exception pending - don't change TOP or the register stack. */
7795 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7796 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7797 }
7798}
7799
7800
7801DECL_NO_INLINE(IEM_STATIC, void)
7802iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7803{
7804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7805 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7806
7807 if (pFpuCtx->FCW & X86_FCW_IM)
7808 {
7809 /* Masked overflow - Push QNaN. */
7810 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7811 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7812 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7813 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7814 pFpuCtx->FTW |= RT_BIT(iNewTop);
7815 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7816 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7817 iemFpuRotateStackPush(pFpuCtx);
7818 }
7819 else
7820 {
7821 /* Exception pending - don't change TOP or the register stack. */
7822 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7823 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7824 }
7825}
7826
7827
7828/**
7829 * Worker routine for raising an FPU stack overflow exception on a push.
7830 *
7831 * @param pFpuCtx The FPU context.
7832 */
7833IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7834{
7835 if (pFpuCtx->FCW & X86_FCW_IM)
7836 {
7837 /* Masked overflow. */
7838 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7839 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7840 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7841 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7842 pFpuCtx->FTW |= RT_BIT(iNewTop);
7843 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7844 iemFpuRotateStackPush(pFpuCtx);
7845 }
7846 else
7847 {
7848 /* Exception pending - don't change TOP or the register stack. */
7849 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7850 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7851 }
7852}
7853
7854
7855/**
7856 * Raises a FPU stack overflow exception on a push.
7857 *
7858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7859 */
7860DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7861{
7862 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7863 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7864 iemFpuStackPushOverflowOnly(pFpuCtx);
7865}
7866
7867
7868/**
7869 * Raises a FPU stack overflow exception on a push with a memory operand.
7870 *
7871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7872 * @param iEffSeg The effective memory operand selector register.
7873 * @param GCPtrEff The effective memory operand offset.
7874 */
7875DECL_NO_INLINE(IEM_STATIC, void)
7876iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7877{
7878 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7879 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7880 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7881 iemFpuStackPushOverflowOnly(pFpuCtx);
7882}
7883
7884
7885IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7886{
7887 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7888 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7889 if (pFpuCtx->FTW & RT_BIT(iReg))
7890 return VINF_SUCCESS;
7891 return VERR_NOT_FOUND;
7892}
7893
7894
7895IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7896{
7897 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7898 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7899 if (pFpuCtx->FTW & RT_BIT(iReg))
7900 {
7901 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7902 return VINF_SUCCESS;
7903 }
7904 return VERR_NOT_FOUND;
7905}
7906
7907
7908IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7909 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7910{
7911 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7912 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7913 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7914 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7915 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7916 {
7917 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7918 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7919 return VINF_SUCCESS;
7920 }
7921 return VERR_NOT_FOUND;
7922}
7923
7924
7925IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7926{
7927 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7928 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7929 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7930 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7931 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7932 {
7933 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7934 return VINF_SUCCESS;
7935 }
7936 return VERR_NOT_FOUND;
7937}
7938
7939
7940/**
7941 * Updates the FPU exception status after FCW is changed.
7942 *
7943 * @param pFpuCtx The FPU context.
7944 */
7945IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7946{
7947 uint16_t u16Fsw = pFpuCtx->FSW;
7948 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7949 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7950 else
7951 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7952 pFpuCtx->FSW = u16Fsw;
7953}
7954
7955
7956/**
7957 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7958 *
7959 * @returns The full FTW.
7960 * @param pFpuCtx The FPU context.
7961 */
7962IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7963{
7964 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7965 uint16_t u16Ftw = 0;
7966 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7967 for (unsigned iSt = 0; iSt < 8; iSt++)
7968 {
7969 unsigned const iReg = (iSt + iTop) & 7;
7970 if (!(u8Ftw & RT_BIT(iReg)))
7971 u16Ftw |= 3 << (iReg * 2); /* empty */
7972 else
7973 {
7974 uint16_t uTag;
7975 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7976 if (pr80Reg->s.uExponent == 0x7fff)
7977 uTag = 2; /* Exponent is all 1's => Special. */
7978 else if (pr80Reg->s.uExponent == 0x0000)
7979 {
7980 if (pr80Reg->s.u64Mantissa == 0x0000)
7981 uTag = 1; /* All bits are zero => Zero. */
7982 else
7983 uTag = 2; /* Must be special. */
7984 }
7985 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7986 uTag = 0; /* Valid. */
7987 else
7988 uTag = 2; /* Must be special. */
7989
7990 u16Ftw |= uTag << (iReg * 2); /* empty */
7991 }
7992 }
7993
7994 return u16Ftw;
7995}
7996
7997
7998/**
7999 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
8000 *
8001 * @returns The compressed FTW.
8002 * @param u16FullFtw The full FTW to convert.
8003 */
8004IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
8005{
8006 uint8_t u8Ftw = 0;
8007 for (unsigned i = 0; i < 8; i++)
8008 {
8009 if ((u16FullFtw & 3) != 3 /*empty*/)
8010 u8Ftw |= RT_BIT(i);
8011 u16FullFtw >>= 2;
8012 }
8013
8014 return u8Ftw;
8015}
8016
8017/** @} */
8018
8019
8020/** @name Memory access.
8021 *
8022 * @{
8023 */
8024
8025
8026/**
8027 * Updates the IEMCPU::cbWritten counter if applicable.
8028 *
8029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8030 * @param fAccess The access being accounted for.
8031 * @param cbMem The access size.
8032 */
8033DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
8034{
8035 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8036 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8037 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8038}
8039
8040
8041/**
8042 * Checks if the given segment can be written to, raise the appropriate
8043 * exception if not.
8044 *
8045 * @returns VBox strict status code.
8046 *
8047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8048 * @param pHid Pointer to the hidden register.
8049 * @param iSegReg The register number.
8050 * @param pu64BaseAddr Where to return the base address to use for the
8051 * segment. (In 64-bit code it may differ from the
8052 * base in the hidden segment.)
8053 */
8054IEM_STATIC VBOXSTRICTRC
8055iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8056{
8057 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8058
8059 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8060 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8061 else
8062 {
8063 if (!pHid->Attr.n.u1Present)
8064 {
8065 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8066 AssertRelease(uSel == 0);
8067 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8068 return iemRaiseGeneralProtectionFault0(pVCpu);
8069 }
8070
8071 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8072 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8073 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8074 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8075 *pu64BaseAddr = pHid->u64Base;
8076 }
8077 return VINF_SUCCESS;
8078}
8079
8080
8081/**
8082 * Checks if the given segment can be read from, raise the appropriate
8083 * exception if not.
8084 *
8085 * @returns VBox strict status code.
8086 *
8087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8088 * @param pHid Pointer to the hidden register.
8089 * @param iSegReg The register number.
8090 * @param pu64BaseAddr Where to return the base address to use for the
8091 * segment. (In 64-bit code it may differ from the
8092 * base in the hidden segment.)
8093 */
8094IEM_STATIC VBOXSTRICTRC
8095iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8096{
8097 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8098
8099 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8100 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8101 else
8102 {
8103 if (!pHid->Attr.n.u1Present)
8104 {
8105 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8106 AssertRelease(uSel == 0);
8107 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8108 return iemRaiseGeneralProtectionFault0(pVCpu);
8109 }
8110
8111 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8112 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8113 *pu64BaseAddr = pHid->u64Base;
8114 }
8115 return VINF_SUCCESS;
8116}
8117
8118
8119/**
8120 * Applies the segment limit, base and attributes.
8121 *
8122 * This may raise a \#GP or \#SS.
8123 *
8124 * @returns VBox strict status code.
8125 *
8126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8127 * @param fAccess The kind of access which is being performed.
8128 * @param iSegReg The index of the segment register to apply.
8129 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8130 * TSS, ++).
8131 * @param cbMem The access size.
8132 * @param pGCPtrMem Pointer to the guest memory address to apply
8133 * segmentation to. Input and output parameter.
8134 */
8135IEM_STATIC VBOXSTRICTRC
8136iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8137{
8138 if (iSegReg == UINT8_MAX)
8139 return VINF_SUCCESS;
8140
8141 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8142 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8143 switch (pVCpu->iem.s.enmCpuMode)
8144 {
8145 case IEMMODE_16BIT:
8146 case IEMMODE_32BIT:
8147 {
8148 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8149 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8150
8151 if ( pSel->Attr.n.u1Present
8152 && !pSel->Attr.n.u1Unusable)
8153 {
8154 Assert(pSel->Attr.n.u1DescType);
8155 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8156 {
8157 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8158 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8159 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8160
8161 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8162 {
8163 /** @todo CPL check. */
8164 }
8165
8166 /*
8167 * There are two kinds of data selectors, normal and expand down.
8168 */
8169 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8170 {
8171 if ( GCPtrFirst32 > pSel->u32Limit
8172 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8173 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8174 }
8175 else
8176 {
8177 /*
8178 * The upper boundary is defined by the B bit, not the G bit!
8179 */
8180 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8181 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8182 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8183 }
8184 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8185 }
8186 else
8187 {
8188
8189 /*
8190 * Code selector and usually be used to read thru, writing is
8191 * only permitted in real and V8086 mode.
8192 */
8193 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8194 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8195 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8196 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8197 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8198
8199 if ( GCPtrFirst32 > pSel->u32Limit
8200 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8201 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8202
8203 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8204 {
8205 /** @todo CPL check. */
8206 }
8207
8208 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8209 }
8210 }
8211 else
8212 return iemRaiseGeneralProtectionFault0(pVCpu);
8213 return VINF_SUCCESS;
8214 }
8215
8216 case IEMMODE_64BIT:
8217 {
8218 RTGCPTR GCPtrMem = *pGCPtrMem;
8219 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8220 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8221
8222 Assert(cbMem >= 1);
8223 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8224 return VINF_SUCCESS;
8225 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8226 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8227 return iemRaiseGeneralProtectionFault0(pVCpu);
8228 }
8229
8230 default:
8231 AssertFailedReturn(VERR_IEM_IPE_7);
8232 }
8233}
8234
8235
8236/**
8237 * Translates a virtual address to a physical physical address and checks if we
8238 * can access the page as specified.
8239 *
8240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8241 * @param GCPtrMem The virtual address.
8242 * @param fAccess The intended access.
8243 * @param pGCPhysMem Where to return the physical address.
8244 */
8245IEM_STATIC VBOXSTRICTRC
8246iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8247{
8248 /** @todo Need a different PGM interface here. We're currently using
8249 * generic / REM interfaces. this won't cut it for R0 & RC. */
8250 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8251 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8252 RTGCPHYS GCPhys;
8253 uint64_t fFlags;
8254 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8255 if (RT_FAILURE(rc))
8256 {
8257 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8258 /** @todo Check unassigned memory in unpaged mode. */
8259 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8260 *pGCPhysMem = NIL_RTGCPHYS;
8261 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8262 }
8263
8264 /* If the page is writable and does not have the no-exec bit set, all
8265 access is allowed. Otherwise we'll have to check more carefully... */
8266 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8267 {
8268 /* Write to read only memory? */
8269 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8270 && !(fFlags & X86_PTE_RW)
8271 && ( (pVCpu->iem.s.uCpl == 3
8272 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8273 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8274 {
8275 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8276 *pGCPhysMem = NIL_RTGCPHYS;
8277 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8278 }
8279
8280 /* Kernel memory accessed by userland? */
8281 if ( !(fFlags & X86_PTE_US)
8282 && pVCpu->iem.s.uCpl == 3
8283 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8284 {
8285 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8286 *pGCPhysMem = NIL_RTGCPHYS;
8287 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8288 }
8289
8290 /* Executing non-executable memory? */
8291 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8292 && (fFlags & X86_PTE_PAE_NX)
8293 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8294 {
8295 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8296 *pGCPhysMem = NIL_RTGCPHYS;
8297 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8298 VERR_ACCESS_DENIED);
8299 }
8300 }
8301
8302 /*
8303 * Set the dirty / access flags.
8304 * ASSUMES this is set when the address is translated rather than on committ...
8305 */
8306 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8307 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8308 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8309 {
8310 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8311 AssertRC(rc2);
8312 }
8313
8314 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8315 *pGCPhysMem = GCPhys;
8316 return VINF_SUCCESS;
8317}
8318
8319
8320
8321/**
8322 * Maps a physical page.
8323 *
8324 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8326 * @param GCPhysMem The physical address.
8327 * @param fAccess The intended access.
8328 * @param ppvMem Where to return the mapping address.
8329 * @param pLock The PGM lock.
8330 */
8331IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8332{
8333#ifdef IEM_LOG_MEMORY_WRITES
8334 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8335 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8336#endif
8337
8338 /** @todo This API may require some improving later. A private deal with PGM
8339 * regarding locking and unlocking needs to be struct. A couple of TLBs
8340 * living in PGM, but with publicly accessible inlined access methods
8341 * could perhaps be an even better solution. */
8342 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8343 GCPhysMem,
8344 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8345 pVCpu->iem.s.fBypassHandlers,
8346 ppvMem,
8347 pLock);
8348 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8349 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8350
8351 return rc;
8352}
8353
8354
8355/**
8356 * Unmap a page previously mapped by iemMemPageMap.
8357 *
8358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8359 * @param GCPhysMem The physical address.
8360 * @param fAccess The intended access.
8361 * @param pvMem What iemMemPageMap returned.
8362 * @param pLock The PGM lock.
8363 */
8364DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8365{
8366 NOREF(pVCpu);
8367 NOREF(GCPhysMem);
8368 NOREF(fAccess);
8369 NOREF(pvMem);
8370 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8371}
8372
8373
8374/**
8375 * Looks up a memory mapping entry.
8376 *
8377 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8379 * @param pvMem The memory address.
8380 * @param fAccess The access to.
8381 */
8382DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8383{
8384 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8385 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8386 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8387 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8388 return 0;
8389 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8390 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8391 return 1;
8392 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8393 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8394 return 2;
8395 return VERR_NOT_FOUND;
8396}
8397
8398
8399/**
8400 * Finds a free memmap entry when using iNextMapping doesn't work.
8401 *
8402 * @returns Memory mapping index, 1024 on failure.
8403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8404 */
8405IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8406{
8407 /*
8408 * The easy case.
8409 */
8410 if (pVCpu->iem.s.cActiveMappings == 0)
8411 {
8412 pVCpu->iem.s.iNextMapping = 1;
8413 return 0;
8414 }
8415
8416 /* There should be enough mappings for all instructions. */
8417 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8418
8419 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8420 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8421 return i;
8422
8423 AssertFailedReturn(1024);
8424}
8425
8426
8427/**
8428 * Commits a bounce buffer that needs writing back and unmaps it.
8429 *
8430 * @returns Strict VBox status code.
8431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8432 * @param iMemMap The index of the buffer to commit.
8433 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8434 * Always false in ring-3, obviously.
8435 */
8436IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8437{
8438 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8439 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8440#ifdef IN_RING3
8441 Assert(!fPostponeFail);
8442 RT_NOREF_PV(fPostponeFail);
8443#endif
8444
8445 /*
8446 * Do the writing.
8447 */
8448 PVM pVM = pVCpu->CTX_SUFF(pVM);
8449 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8450 {
8451 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8452 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8453 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8454 if (!pVCpu->iem.s.fBypassHandlers)
8455 {
8456 /*
8457 * Carefully and efficiently dealing with access handler return
8458 * codes make this a little bloated.
8459 */
8460 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8461 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8462 pbBuf,
8463 cbFirst,
8464 PGMACCESSORIGIN_IEM);
8465 if (rcStrict == VINF_SUCCESS)
8466 {
8467 if (cbSecond)
8468 {
8469 rcStrict = PGMPhysWrite(pVM,
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8471 pbBuf + cbFirst,
8472 cbSecond,
8473 PGMACCESSORIGIN_IEM);
8474 if (rcStrict == VINF_SUCCESS)
8475 { /* nothing */ }
8476 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8477 {
8478 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8480 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8481 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8482 }
8483#ifndef IN_RING3
8484 else if (fPostponeFail)
8485 {
8486 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8489 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8490 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8491 return iemSetPassUpStatus(pVCpu, rcStrict);
8492 }
8493#endif
8494 else
8495 {
8496 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8497 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8498 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8499 return rcStrict;
8500 }
8501 }
8502 }
8503 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8504 {
8505 if (!cbSecond)
8506 {
8507 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8508 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8509 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8510 }
8511 else
8512 {
8513 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8514 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8515 pbBuf + cbFirst,
8516 cbSecond,
8517 PGMACCESSORIGIN_IEM);
8518 if (rcStrict2 == VINF_SUCCESS)
8519 {
8520 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8521 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8523 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8524 }
8525 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8526 {
8527 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8528 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8529 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8530 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8531 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8532 }
8533#ifndef IN_RING3
8534 else if (fPostponeFail)
8535 {
8536 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8537 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8538 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8539 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8540 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8541 return iemSetPassUpStatus(pVCpu, rcStrict);
8542 }
8543#endif
8544 else
8545 {
8546 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8548 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8549 return rcStrict2;
8550 }
8551 }
8552 }
8553#ifndef IN_RING3
8554 else if (fPostponeFail)
8555 {
8556 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8557 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8558 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8559 if (!cbSecond)
8560 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8561 else
8562 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8563 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8564 return iemSetPassUpStatus(pVCpu, rcStrict);
8565 }
8566#endif
8567 else
8568 {
8569 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8570 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8571 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8572 return rcStrict;
8573 }
8574 }
8575 else
8576 {
8577 /*
8578 * No access handlers, much simpler.
8579 */
8580 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8581 if (RT_SUCCESS(rc))
8582 {
8583 if (cbSecond)
8584 {
8585 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8586 if (RT_SUCCESS(rc))
8587 { /* likely */ }
8588 else
8589 {
8590 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8591 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8592 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8593 return rc;
8594 }
8595 }
8596 }
8597 else
8598 {
8599 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8600 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8601 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8602 return rc;
8603 }
8604 }
8605 }
8606
8607#if defined(IEM_LOG_MEMORY_WRITES)
8608 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8609 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8610 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8611 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8612 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8613 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8614
8615 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8616 g_cbIemWrote = cbWrote;
8617 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8618#endif
8619
8620 /*
8621 * Free the mapping entry.
8622 */
8623 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8624 Assert(pVCpu->iem.s.cActiveMappings != 0);
8625 pVCpu->iem.s.cActiveMappings--;
8626 return VINF_SUCCESS;
8627}
8628
8629
8630/**
8631 * iemMemMap worker that deals with a request crossing pages.
8632 */
8633IEM_STATIC VBOXSTRICTRC
8634iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8635{
8636 /*
8637 * Do the address translations.
8638 */
8639 RTGCPHYS GCPhysFirst;
8640 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8641 if (rcStrict != VINF_SUCCESS)
8642 return rcStrict;
8643
8644 RTGCPHYS GCPhysSecond;
8645 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8646 fAccess, &GCPhysSecond);
8647 if (rcStrict != VINF_SUCCESS)
8648 return rcStrict;
8649 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8650
8651 PVM pVM = pVCpu->CTX_SUFF(pVM);
8652
8653 /*
8654 * Read in the current memory content if it's a read, execute or partial
8655 * write access.
8656 */
8657 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8658 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8659 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8660
8661 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8662 {
8663 if (!pVCpu->iem.s.fBypassHandlers)
8664 {
8665 /*
8666 * Must carefully deal with access handler status codes here,
8667 * makes the code a bit bloated.
8668 */
8669 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8670 if (rcStrict == VINF_SUCCESS)
8671 {
8672 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8673 if (rcStrict == VINF_SUCCESS)
8674 { /*likely */ }
8675 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8676 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8677 else
8678 {
8679 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8680 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8681 return rcStrict;
8682 }
8683 }
8684 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8685 {
8686 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8687 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8688 {
8689 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8690 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8691 }
8692 else
8693 {
8694 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8695 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8696 return rcStrict2;
8697 }
8698 }
8699 else
8700 {
8701 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8702 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8703 return rcStrict;
8704 }
8705 }
8706 else
8707 {
8708 /*
8709 * No informational status codes here, much more straight forward.
8710 */
8711 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8712 if (RT_SUCCESS(rc))
8713 {
8714 Assert(rc == VINF_SUCCESS);
8715 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8716 if (RT_SUCCESS(rc))
8717 Assert(rc == VINF_SUCCESS);
8718 else
8719 {
8720 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8721 return rc;
8722 }
8723 }
8724 else
8725 {
8726 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8727 return rc;
8728 }
8729 }
8730 }
8731#ifdef VBOX_STRICT
8732 else
8733 memset(pbBuf, 0xcc, cbMem);
8734 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8735 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8736#endif
8737
8738 /*
8739 * Commit the bounce buffer entry.
8740 */
8741 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8742 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8743 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8744 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8745 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8746 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8747 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8748 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8749 pVCpu->iem.s.cActiveMappings++;
8750
8751 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8752 *ppvMem = pbBuf;
8753 return VINF_SUCCESS;
8754}
8755
8756
8757/**
8758 * iemMemMap woker that deals with iemMemPageMap failures.
8759 */
8760IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8761 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8762{
8763 /*
8764 * Filter out conditions we can handle and the ones which shouldn't happen.
8765 */
8766 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8767 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8768 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8769 {
8770 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8771 return rcMap;
8772 }
8773 pVCpu->iem.s.cPotentialExits++;
8774
8775 /*
8776 * Read in the current memory content if it's a read, execute or partial
8777 * write access.
8778 */
8779 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8780 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8781 {
8782 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8783 memset(pbBuf, 0xff, cbMem);
8784 else
8785 {
8786 int rc;
8787 if (!pVCpu->iem.s.fBypassHandlers)
8788 {
8789 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8790 if (rcStrict == VINF_SUCCESS)
8791 { /* nothing */ }
8792 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8793 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8794 else
8795 {
8796 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8797 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8798 return rcStrict;
8799 }
8800 }
8801 else
8802 {
8803 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8804 if (RT_SUCCESS(rc))
8805 { /* likely */ }
8806 else
8807 {
8808 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8809 GCPhysFirst, rc));
8810 return rc;
8811 }
8812 }
8813 }
8814 }
8815#ifdef VBOX_STRICT
8816 else
8817 memset(pbBuf, 0xcc, cbMem);
8818#endif
8819#ifdef VBOX_STRICT
8820 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8821 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8822#endif
8823
8824 /*
8825 * Commit the bounce buffer entry.
8826 */
8827 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8828 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8829 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8830 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8831 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8832 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8833 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8834 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8835 pVCpu->iem.s.cActiveMappings++;
8836
8837 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8838 *ppvMem = pbBuf;
8839 return VINF_SUCCESS;
8840}
8841
8842
8843
8844/**
8845 * Maps the specified guest memory for the given kind of access.
8846 *
8847 * This may be using bounce buffering of the memory if it's crossing a page
8848 * boundary or if there is an access handler installed for any of it. Because
8849 * of lock prefix guarantees, we're in for some extra clutter when this
8850 * happens.
8851 *
8852 * This may raise a \#GP, \#SS, \#PF or \#AC.
8853 *
8854 * @returns VBox strict status code.
8855 *
8856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8857 * @param ppvMem Where to return the pointer to the mapped
8858 * memory.
8859 * @param cbMem The number of bytes to map. This is usually 1,
8860 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8861 * string operations it can be up to a page.
8862 * @param iSegReg The index of the segment register to use for
8863 * this access. The base and limits are checked.
8864 * Use UINT8_MAX to indicate that no segmentation
8865 * is required (for IDT, GDT and LDT accesses).
8866 * @param GCPtrMem The address of the guest memory.
8867 * @param fAccess How the memory is being accessed. The
8868 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8869 * how to map the memory, while the
8870 * IEM_ACCESS_WHAT_XXX bit is used when raising
8871 * exceptions.
8872 */
8873IEM_STATIC VBOXSTRICTRC
8874iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8875{
8876 /*
8877 * Check the input and figure out which mapping entry to use.
8878 */
8879 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8880 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8881 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8882
8883 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8884 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8885 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8886 {
8887 iMemMap = iemMemMapFindFree(pVCpu);
8888 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8889 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8890 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8891 pVCpu->iem.s.aMemMappings[2].fAccess),
8892 VERR_IEM_IPE_9);
8893 }
8894
8895 /*
8896 * Map the memory, checking that we can actually access it. If something
8897 * slightly complicated happens, fall back on bounce buffering.
8898 */
8899 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8900 if (rcStrict != VINF_SUCCESS)
8901 return rcStrict;
8902
8903 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8904 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8905
8906 RTGCPHYS GCPhysFirst;
8907 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8908 if (rcStrict != VINF_SUCCESS)
8909 return rcStrict;
8910
8911 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8912 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8913 if (fAccess & IEM_ACCESS_TYPE_READ)
8914 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8915
8916 void *pvMem;
8917 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8918 if (rcStrict != VINF_SUCCESS)
8919 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8920
8921 /*
8922 * Fill in the mapping table entry.
8923 */
8924 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8925 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8926 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8927 pVCpu->iem.s.cActiveMappings++;
8928
8929 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8930 *ppvMem = pvMem;
8931
8932 return VINF_SUCCESS;
8933}
8934
8935
8936/**
8937 * Commits the guest memory if bounce buffered and unmaps it.
8938 *
8939 * @returns Strict VBox status code.
8940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8941 * @param pvMem The mapping.
8942 * @param fAccess The kind of access.
8943 */
8944IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8945{
8946 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8947 AssertReturn(iMemMap >= 0, iMemMap);
8948
8949 /* If it's bounce buffered, we may need to write back the buffer. */
8950 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8951 {
8952 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8953 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8954 }
8955 /* Otherwise unlock it. */
8956 else
8957 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8958
8959 /* Free the entry. */
8960 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8961 Assert(pVCpu->iem.s.cActiveMappings != 0);
8962 pVCpu->iem.s.cActiveMappings--;
8963 return VINF_SUCCESS;
8964}
8965
8966#ifdef IEM_WITH_SETJMP
8967
8968/**
8969 * Maps the specified guest memory for the given kind of access, longjmp on
8970 * error.
8971 *
8972 * This may be using bounce buffering of the memory if it's crossing a page
8973 * boundary or if there is an access handler installed for any of it. Because
8974 * of lock prefix guarantees, we're in for some extra clutter when this
8975 * happens.
8976 *
8977 * This may raise a \#GP, \#SS, \#PF or \#AC.
8978 *
8979 * @returns Pointer to the mapped memory.
8980 *
8981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8982 * @param cbMem The number of bytes to map. This is usually 1,
8983 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8984 * string operations it can be up to a page.
8985 * @param iSegReg The index of the segment register to use for
8986 * this access. The base and limits are checked.
8987 * Use UINT8_MAX to indicate that no segmentation
8988 * is required (for IDT, GDT and LDT accesses).
8989 * @param GCPtrMem The address of the guest memory.
8990 * @param fAccess How the memory is being accessed. The
8991 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8992 * how to map the memory, while the
8993 * IEM_ACCESS_WHAT_XXX bit is used when raising
8994 * exceptions.
8995 */
8996IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8997{
8998 /*
8999 * Check the input and figure out which mapping entry to use.
9000 */
9001 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
9002 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
9003 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
9004
9005 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9006 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9007 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9008 {
9009 iMemMap = iemMemMapFindFree(pVCpu);
9010 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9011 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9012 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9013 pVCpu->iem.s.aMemMappings[2].fAccess),
9014 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9015 }
9016
9017 /*
9018 * Map the memory, checking that we can actually access it. If something
9019 * slightly complicated happens, fall back on bounce buffering.
9020 */
9021 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9022 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9023 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9024
9025 /* Crossing a page boundary? */
9026 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9027 { /* No (likely). */ }
9028 else
9029 {
9030 void *pvMem;
9031 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9032 if (rcStrict == VINF_SUCCESS)
9033 return pvMem;
9034 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9035 }
9036
9037 RTGCPHYS GCPhysFirst;
9038 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9039 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9040 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9041
9042 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9043 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9044 if (fAccess & IEM_ACCESS_TYPE_READ)
9045 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9046
9047 void *pvMem;
9048 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9049 if (rcStrict == VINF_SUCCESS)
9050 { /* likely */ }
9051 else
9052 {
9053 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9054 if (rcStrict == VINF_SUCCESS)
9055 return pvMem;
9056 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9057 }
9058
9059 /*
9060 * Fill in the mapping table entry.
9061 */
9062 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9063 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9064 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9065 pVCpu->iem.s.cActiveMappings++;
9066
9067 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9068 return pvMem;
9069}
9070
9071
9072/**
9073 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9074 *
9075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9076 * @param pvMem The mapping.
9077 * @param fAccess The kind of access.
9078 */
9079IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9080{
9081 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9082 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9083
9084 /* If it's bounce buffered, we may need to write back the buffer. */
9085 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9086 {
9087 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9088 {
9089 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9090 if (rcStrict == VINF_SUCCESS)
9091 return;
9092 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9093 }
9094 }
9095 /* Otherwise unlock it. */
9096 else
9097 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9098
9099 /* Free the entry. */
9100 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9101 Assert(pVCpu->iem.s.cActiveMappings != 0);
9102 pVCpu->iem.s.cActiveMappings--;
9103}
9104
9105#endif /* IEM_WITH_SETJMP */
9106
9107#ifndef IN_RING3
9108/**
9109 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9110 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9111 *
9112 * Allows the instruction to be completed and retired, while the IEM user will
9113 * return to ring-3 immediately afterwards and do the postponed writes there.
9114 *
9115 * @returns VBox status code (no strict statuses). Caller must check
9116 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9118 * @param pvMem The mapping.
9119 * @param fAccess The kind of access.
9120 */
9121IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9122{
9123 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9124 AssertReturn(iMemMap >= 0, iMemMap);
9125
9126 /* If it's bounce buffered, we may need to write back the buffer. */
9127 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9128 {
9129 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9130 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9131 }
9132 /* Otherwise unlock it. */
9133 else
9134 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9135
9136 /* Free the entry. */
9137 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9138 Assert(pVCpu->iem.s.cActiveMappings != 0);
9139 pVCpu->iem.s.cActiveMappings--;
9140 return VINF_SUCCESS;
9141}
9142#endif
9143
9144
9145/**
9146 * Rollbacks mappings, releasing page locks and such.
9147 *
9148 * The caller shall only call this after checking cActiveMappings.
9149 *
9150 * @returns Strict VBox status code to pass up.
9151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9152 */
9153IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9154{
9155 Assert(pVCpu->iem.s.cActiveMappings > 0);
9156
9157 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9158 while (iMemMap-- > 0)
9159 {
9160 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9161 if (fAccess != IEM_ACCESS_INVALID)
9162 {
9163 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9164 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9165 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9166 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9167 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9168 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9169 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9170 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9171 pVCpu->iem.s.cActiveMappings--;
9172 }
9173 }
9174}
9175
9176
9177/**
9178 * Fetches a data byte.
9179 *
9180 * @returns Strict VBox status code.
9181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9182 * @param pu8Dst Where to return the byte.
9183 * @param iSegReg The index of the segment register to use for
9184 * this access. The base and limits are checked.
9185 * @param GCPtrMem The address of the guest memory.
9186 */
9187IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9188{
9189 /* The lazy approach for now... */
9190 uint8_t const *pu8Src;
9191 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9192 if (rc == VINF_SUCCESS)
9193 {
9194 *pu8Dst = *pu8Src;
9195 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9196 }
9197 return rc;
9198}
9199
9200
9201#ifdef IEM_WITH_SETJMP
9202/**
9203 * Fetches a data byte, longjmp on error.
9204 *
9205 * @returns The byte.
9206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9207 * @param iSegReg The index of the segment register to use for
9208 * this access. The base and limits are checked.
9209 * @param GCPtrMem The address of the guest memory.
9210 */
9211DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9212{
9213 /* The lazy approach for now... */
9214 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9215 uint8_t const bRet = *pu8Src;
9216 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9217 return bRet;
9218}
9219#endif /* IEM_WITH_SETJMP */
9220
9221
9222/**
9223 * Fetches a data word.
9224 *
9225 * @returns Strict VBox status code.
9226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9227 * @param pu16Dst Where to return the word.
9228 * @param iSegReg The index of the segment register to use for
9229 * this access. The base and limits are checked.
9230 * @param GCPtrMem The address of the guest memory.
9231 */
9232IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9233{
9234 /* The lazy approach for now... */
9235 uint16_t const *pu16Src;
9236 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9237 if (rc == VINF_SUCCESS)
9238 {
9239 *pu16Dst = *pu16Src;
9240 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9241 }
9242 return rc;
9243}
9244
9245
9246#ifdef IEM_WITH_SETJMP
9247/**
9248 * Fetches a data word, longjmp on error.
9249 *
9250 * @returns The word
9251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9252 * @param iSegReg The index of the segment register to use for
9253 * this access. The base and limits are checked.
9254 * @param GCPtrMem The address of the guest memory.
9255 */
9256DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9257{
9258 /* The lazy approach for now... */
9259 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9260 uint16_t const u16Ret = *pu16Src;
9261 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9262 return u16Ret;
9263}
9264#endif
9265
9266
9267/**
9268 * Fetches a data dword.
9269 *
9270 * @returns Strict VBox status code.
9271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9272 * @param pu32Dst Where to return the dword.
9273 * @param iSegReg The index of the segment register to use for
9274 * this access. The base and limits are checked.
9275 * @param GCPtrMem The address of the guest memory.
9276 */
9277IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9278{
9279 /* The lazy approach for now... */
9280 uint32_t const *pu32Src;
9281 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9282 if (rc == VINF_SUCCESS)
9283 {
9284 *pu32Dst = *pu32Src;
9285 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9286 }
9287 return rc;
9288}
9289
9290
9291#ifdef IEM_WITH_SETJMP
9292
9293IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9294{
9295 Assert(cbMem >= 1);
9296 Assert(iSegReg < X86_SREG_COUNT);
9297
9298 /*
9299 * 64-bit mode is simpler.
9300 */
9301 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9302 {
9303 if (iSegReg >= X86_SREG_FS)
9304 {
9305 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9306 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9307 GCPtrMem += pSel->u64Base;
9308 }
9309
9310 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9311 return GCPtrMem;
9312 }
9313 /*
9314 * 16-bit and 32-bit segmentation.
9315 */
9316 else
9317 {
9318 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9319 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9320 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9321 == X86DESCATTR_P /* data, expand up */
9322 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9323 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9324 {
9325 /* expand up */
9326 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9327 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9328 && GCPtrLast32 > (uint32_t)GCPtrMem))
9329 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9330 }
9331 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9332 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9333 {
9334 /* expand down */
9335 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9336 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9337 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9338 && GCPtrLast32 > (uint32_t)GCPtrMem))
9339 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9340 }
9341 else
9342 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9343 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9344 }
9345 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9346}
9347
9348
9349IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9350{
9351 Assert(cbMem >= 1);
9352 Assert(iSegReg < X86_SREG_COUNT);
9353
9354 /*
9355 * 64-bit mode is simpler.
9356 */
9357 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9358 {
9359 if (iSegReg >= X86_SREG_FS)
9360 {
9361 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9362 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9363 GCPtrMem += pSel->u64Base;
9364 }
9365
9366 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9367 return GCPtrMem;
9368 }
9369 /*
9370 * 16-bit and 32-bit segmentation.
9371 */
9372 else
9373 {
9374 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9375 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9376 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9377 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9378 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9379 {
9380 /* expand up */
9381 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9382 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9383 && GCPtrLast32 > (uint32_t)GCPtrMem))
9384 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9385 }
9386 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9387 {
9388 /* expand down */
9389 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9390 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9391 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9392 && GCPtrLast32 > (uint32_t)GCPtrMem))
9393 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9394 }
9395 else
9396 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9397 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9398 }
9399 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9400}
9401
9402
9403/**
9404 * Fetches a data dword, longjmp on error, fallback/safe version.
9405 *
9406 * @returns The dword
9407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9408 * @param iSegReg The index of the segment register to use for
9409 * this access. The base and limits are checked.
9410 * @param GCPtrMem The address of the guest memory.
9411 */
9412IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9413{
9414 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9415 uint32_t const u32Ret = *pu32Src;
9416 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9417 return u32Ret;
9418}
9419
9420
9421/**
9422 * Fetches a data dword, longjmp on error.
9423 *
9424 * @returns The dword
9425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9426 * @param iSegReg The index of the segment register to use for
9427 * this access. The base and limits are checked.
9428 * @param GCPtrMem The address of the guest memory.
9429 */
9430DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9431{
9432# ifdef IEM_WITH_DATA_TLB
9433 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9434 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9435 {
9436 /// @todo more later.
9437 }
9438
9439 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9440# else
9441 /* The lazy approach. */
9442 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9443 uint32_t const u32Ret = *pu32Src;
9444 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9445 return u32Ret;
9446# endif
9447}
9448#endif
9449
9450
9451#ifdef SOME_UNUSED_FUNCTION
9452/**
9453 * Fetches a data dword and sign extends it to a qword.
9454 *
9455 * @returns Strict VBox status code.
9456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9457 * @param pu64Dst Where to return the sign extended value.
9458 * @param iSegReg The index of the segment register to use for
9459 * this access. The base and limits are checked.
9460 * @param GCPtrMem The address of the guest memory.
9461 */
9462IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9463{
9464 /* The lazy approach for now... */
9465 int32_t const *pi32Src;
9466 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9467 if (rc == VINF_SUCCESS)
9468 {
9469 *pu64Dst = *pi32Src;
9470 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9471 }
9472#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9473 else
9474 *pu64Dst = 0;
9475#endif
9476 return rc;
9477}
9478#endif
9479
9480
9481/**
9482 * Fetches a data qword.
9483 *
9484 * @returns Strict VBox status code.
9485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9486 * @param pu64Dst Where to return the qword.
9487 * @param iSegReg The index of the segment register to use for
9488 * this access. The base and limits are checked.
9489 * @param GCPtrMem The address of the guest memory.
9490 */
9491IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9492{
9493 /* The lazy approach for now... */
9494 uint64_t const *pu64Src;
9495 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9496 if (rc == VINF_SUCCESS)
9497 {
9498 *pu64Dst = *pu64Src;
9499 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9500 }
9501 return rc;
9502}
9503
9504
9505#ifdef IEM_WITH_SETJMP
9506/**
9507 * Fetches a data qword, longjmp on error.
9508 *
9509 * @returns The qword.
9510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9511 * @param iSegReg The index of the segment register to use for
9512 * this access. The base and limits are checked.
9513 * @param GCPtrMem The address of the guest memory.
9514 */
9515DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9516{
9517 /* The lazy approach for now... */
9518 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9519 uint64_t const u64Ret = *pu64Src;
9520 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9521 return u64Ret;
9522}
9523#endif
9524
9525
9526/**
9527 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9528 *
9529 * @returns Strict VBox status code.
9530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9531 * @param pu64Dst Where to return the qword.
9532 * @param iSegReg The index of the segment register to use for
9533 * this access. The base and limits are checked.
9534 * @param GCPtrMem The address of the guest memory.
9535 */
9536IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9537{
9538 /* The lazy approach for now... */
9539 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9540 if (RT_UNLIKELY(GCPtrMem & 15))
9541 return iemRaiseGeneralProtectionFault0(pVCpu);
9542
9543 uint64_t const *pu64Src;
9544 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9545 if (rc == VINF_SUCCESS)
9546 {
9547 *pu64Dst = *pu64Src;
9548 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9549 }
9550 return rc;
9551}
9552
9553
9554#ifdef IEM_WITH_SETJMP
9555/**
9556 * Fetches a data qword, longjmp on error.
9557 *
9558 * @returns The qword.
9559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9560 * @param iSegReg The index of the segment register to use for
9561 * this access. The base and limits are checked.
9562 * @param GCPtrMem The address of the guest memory.
9563 */
9564DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9565{
9566 /* The lazy approach for now... */
9567 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9568 if (RT_LIKELY(!(GCPtrMem & 15)))
9569 {
9570 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9571 uint64_t const u64Ret = *pu64Src;
9572 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9573 return u64Ret;
9574 }
9575
9576 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9577 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9578}
9579#endif
9580
9581
9582/**
9583 * Fetches a data tword.
9584 *
9585 * @returns Strict VBox status code.
9586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9587 * @param pr80Dst Where to return the tword.
9588 * @param iSegReg The index of the segment register to use for
9589 * this access. The base and limits are checked.
9590 * @param GCPtrMem The address of the guest memory.
9591 */
9592IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9593{
9594 /* The lazy approach for now... */
9595 PCRTFLOAT80U pr80Src;
9596 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9597 if (rc == VINF_SUCCESS)
9598 {
9599 *pr80Dst = *pr80Src;
9600 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9601 }
9602 return rc;
9603}
9604
9605
9606#ifdef IEM_WITH_SETJMP
9607/**
9608 * Fetches a data tword, longjmp on error.
9609 *
9610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9611 * @param pr80Dst Where to return the tword.
9612 * @param iSegReg The index of the segment register to use for
9613 * this access. The base and limits are checked.
9614 * @param GCPtrMem The address of the guest memory.
9615 */
9616DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9617{
9618 /* The lazy approach for now... */
9619 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9620 *pr80Dst = *pr80Src;
9621 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9622}
9623#endif
9624
9625
9626/**
9627 * Fetches a data dqword (double qword), generally SSE related.
9628 *
9629 * @returns Strict VBox status code.
9630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9631 * @param pu128Dst Where to return the qword.
9632 * @param iSegReg The index of the segment register to use for
9633 * this access. The base and limits are checked.
9634 * @param GCPtrMem The address of the guest memory.
9635 */
9636IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9637{
9638 /* The lazy approach for now... */
9639 PCRTUINT128U pu128Src;
9640 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9641 if (rc == VINF_SUCCESS)
9642 {
9643 pu128Dst->au64[0] = pu128Src->au64[0];
9644 pu128Dst->au64[1] = pu128Src->au64[1];
9645 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9646 }
9647 return rc;
9648}
9649
9650
9651#ifdef IEM_WITH_SETJMP
9652/**
9653 * Fetches a data dqword (double qword), generally SSE related.
9654 *
9655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9656 * @param pu128Dst Where to return the qword.
9657 * @param iSegReg The index of the segment register to use for
9658 * this access. The base and limits are checked.
9659 * @param GCPtrMem The address of the guest memory.
9660 */
9661IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9662{
9663 /* The lazy approach for now... */
9664 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9665 pu128Dst->au64[0] = pu128Src->au64[0];
9666 pu128Dst->au64[1] = pu128Src->au64[1];
9667 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9668}
9669#endif
9670
9671
9672/**
9673 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9674 * related.
9675 *
9676 * Raises \#GP(0) if not aligned.
9677 *
9678 * @returns Strict VBox status code.
9679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9680 * @param pu128Dst Where to return the qword.
9681 * @param iSegReg The index of the segment register to use for
9682 * this access. The base and limits are checked.
9683 * @param GCPtrMem The address of the guest memory.
9684 */
9685IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9686{
9687 /* The lazy approach for now... */
9688 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9689 if ( (GCPtrMem & 15)
9690 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9691 return iemRaiseGeneralProtectionFault0(pVCpu);
9692
9693 PCRTUINT128U pu128Src;
9694 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9695 if (rc == VINF_SUCCESS)
9696 {
9697 pu128Dst->au64[0] = pu128Src->au64[0];
9698 pu128Dst->au64[1] = pu128Src->au64[1];
9699 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9700 }
9701 return rc;
9702}
9703
9704
9705#ifdef IEM_WITH_SETJMP
9706/**
9707 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9708 * related, longjmp on error.
9709 *
9710 * Raises \#GP(0) if not aligned.
9711 *
9712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9713 * @param pu128Dst Where to return the qword.
9714 * @param iSegReg The index of the segment register to use for
9715 * this access. The base and limits are checked.
9716 * @param GCPtrMem The address of the guest memory.
9717 */
9718DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9719{
9720 /* The lazy approach for now... */
9721 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9722 if ( (GCPtrMem & 15) == 0
9723 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9724 {
9725 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9726 pu128Dst->au64[0] = pu128Src->au64[0];
9727 pu128Dst->au64[1] = pu128Src->au64[1];
9728 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9729 return;
9730 }
9731
9732 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9733 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9734}
9735#endif
9736
9737
9738/**
9739 * Fetches a data oword (octo word), generally AVX related.
9740 *
9741 * @returns Strict VBox status code.
9742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9743 * @param pu256Dst Where to return the qword.
9744 * @param iSegReg The index of the segment register to use for
9745 * this access. The base and limits are checked.
9746 * @param GCPtrMem The address of the guest memory.
9747 */
9748IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9749{
9750 /* The lazy approach for now... */
9751 PCRTUINT256U pu256Src;
9752 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9753 if (rc == VINF_SUCCESS)
9754 {
9755 pu256Dst->au64[0] = pu256Src->au64[0];
9756 pu256Dst->au64[1] = pu256Src->au64[1];
9757 pu256Dst->au64[2] = pu256Src->au64[2];
9758 pu256Dst->au64[3] = pu256Src->au64[3];
9759 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9760 }
9761 return rc;
9762}
9763
9764
9765#ifdef IEM_WITH_SETJMP
9766/**
9767 * Fetches a data oword (octo word), generally AVX related.
9768 *
9769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9770 * @param pu256Dst Where to return the qword.
9771 * @param iSegReg The index of the segment register to use for
9772 * this access. The base and limits are checked.
9773 * @param GCPtrMem The address of the guest memory.
9774 */
9775IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9776{
9777 /* The lazy approach for now... */
9778 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9779 pu256Dst->au64[0] = pu256Src->au64[0];
9780 pu256Dst->au64[1] = pu256Src->au64[1];
9781 pu256Dst->au64[2] = pu256Src->au64[2];
9782 pu256Dst->au64[3] = pu256Src->au64[3];
9783 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9784}
9785#endif
9786
9787
9788/**
9789 * Fetches a data oword (octo word) at an aligned address, generally AVX
9790 * related.
9791 *
9792 * Raises \#GP(0) if not aligned.
9793 *
9794 * @returns Strict VBox status code.
9795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9796 * @param pu256Dst Where to return the qword.
9797 * @param iSegReg The index of the segment register to use for
9798 * this access. The base and limits are checked.
9799 * @param GCPtrMem The address of the guest memory.
9800 */
9801IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9802{
9803 /* The lazy approach for now... */
9804 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9805 if (GCPtrMem & 31)
9806 return iemRaiseGeneralProtectionFault0(pVCpu);
9807
9808 PCRTUINT256U pu256Src;
9809 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9810 if (rc == VINF_SUCCESS)
9811 {
9812 pu256Dst->au64[0] = pu256Src->au64[0];
9813 pu256Dst->au64[1] = pu256Src->au64[1];
9814 pu256Dst->au64[2] = pu256Src->au64[2];
9815 pu256Dst->au64[3] = pu256Src->au64[3];
9816 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9817 }
9818 return rc;
9819}
9820
9821
9822#ifdef IEM_WITH_SETJMP
9823/**
9824 * Fetches a data oword (octo word) at an aligned address, generally AVX
9825 * related, longjmp on error.
9826 *
9827 * Raises \#GP(0) if not aligned.
9828 *
9829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9830 * @param pu256Dst Where to return the qword.
9831 * @param iSegReg The index of the segment register to use for
9832 * this access. The base and limits are checked.
9833 * @param GCPtrMem The address of the guest memory.
9834 */
9835DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9836{
9837 /* The lazy approach for now... */
9838 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9839 if ((GCPtrMem & 31) == 0)
9840 {
9841 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9842 pu256Dst->au64[0] = pu256Src->au64[0];
9843 pu256Dst->au64[1] = pu256Src->au64[1];
9844 pu256Dst->au64[2] = pu256Src->au64[2];
9845 pu256Dst->au64[3] = pu256Src->au64[3];
9846 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9847 return;
9848 }
9849
9850 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9851 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9852}
9853#endif
9854
9855
9856
9857/**
9858 * Fetches a descriptor register (lgdt, lidt).
9859 *
9860 * @returns Strict VBox status code.
9861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9862 * @param pcbLimit Where to return the limit.
9863 * @param pGCPtrBase Where to return the base.
9864 * @param iSegReg The index of the segment register to use for
9865 * this access. The base and limits are checked.
9866 * @param GCPtrMem The address of the guest memory.
9867 * @param enmOpSize The effective operand size.
9868 */
9869IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9870 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9871{
9872 /*
9873 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9874 * little special:
9875 * - The two reads are done separately.
9876 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9877 * - We suspect the 386 to actually commit the limit before the base in
9878 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9879 * don't try emulate this eccentric behavior, because it's not well
9880 * enough understood and rather hard to trigger.
9881 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9882 */
9883 VBOXSTRICTRC rcStrict;
9884 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9885 {
9886 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9887 if (rcStrict == VINF_SUCCESS)
9888 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9889 }
9890 else
9891 {
9892 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9893 if (enmOpSize == IEMMODE_32BIT)
9894 {
9895 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9896 {
9897 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9898 if (rcStrict == VINF_SUCCESS)
9899 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9900 }
9901 else
9902 {
9903 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9904 if (rcStrict == VINF_SUCCESS)
9905 {
9906 *pcbLimit = (uint16_t)uTmp;
9907 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9908 }
9909 }
9910 if (rcStrict == VINF_SUCCESS)
9911 *pGCPtrBase = uTmp;
9912 }
9913 else
9914 {
9915 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9916 if (rcStrict == VINF_SUCCESS)
9917 {
9918 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9919 if (rcStrict == VINF_SUCCESS)
9920 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9921 }
9922 }
9923 }
9924 return rcStrict;
9925}
9926
9927
9928
9929/**
9930 * Stores a data byte.
9931 *
9932 * @returns Strict VBox status code.
9933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9934 * @param iSegReg The index of the segment register to use for
9935 * this access. The base and limits are checked.
9936 * @param GCPtrMem The address of the guest memory.
9937 * @param u8Value The value to store.
9938 */
9939IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9940{
9941 /* The lazy approach for now... */
9942 uint8_t *pu8Dst;
9943 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9944 if (rc == VINF_SUCCESS)
9945 {
9946 *pu8Dst = u8Value;
9947 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9948 }
9949 return rc;
9950}
9951
9952
9953#ifdef IEM_WITH_SETJMP
9954/**
9955 * Stores a data byte, longjmp on error.
9956 *
9957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9958 * @param iSegReg The index of the segment register to use for
9959 * this access. The base and limits are checked.
9960 * @param GCPtrMem The address of the guest memory.
9961 * @param u8Value The value to store.
9962 */
9963IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9964{
9965 /* The lazy approach for now... */
9966 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9967 *pu8Dst = u8Value;
9968 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9969}
9970#endif
9971
9972
9973/**
9974 * Stores a data word.
9975 *
9976 * @returns Strict VBox status code.
9977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9978 * @param iSegReg The index of the segment register to use for
9979 * this access. The base and limits are checked.
9980 * @param GCPtrMem The address of the guest memory.
9981 * @param u16Value The value to store.
9982 */
9983IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9984{
9985 /* The lazy approach for now... */
9986 uint16_t *pu16Dst;
9987 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9988 if (rc == VINF_SUCCESS)
9989 {
9990 *pu16Dst = u16Value;
9991 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9992 }
9993 return rc;
9994}
9995
9996
9997#ifdef IEM_WITH_SETJMP
9998/**
9999 * Stores a data word, longjmp on error.
10000 *
10001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10002 * @param iSegReg The index of the segment register to use for
10003 * this access. The base and limits are checked.
10004 * @param GCPtrMem The address of the guest memory.
10005 * @param u16Value The value to store.
10006 */
10007IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10008{
10009 /* The lazy approach for now... */
10010 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10011 *pu16Dst = u16Value;
10012 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10013}
10014#endif
10015
10016
10017/**
10018 * Stores a data dword.
10019 *
10020 * @returns Strict VBox status code.
10021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10022 * @param iSegReg The index of the segment register to use for
10023 * this access. The base and limits are checked.
10024 * @param GCPtrMem The address of the guest memory.
10025 * @param u32Value The value to store.
10026 */
10027IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10028{
10029 /* The lazy approach for now... */
10030 uint32_t *pu32Dst;
10031 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10032 if (rc == VINF_SUCCESS)
10033 {
10034 *pu32Dst = u32Value;
10035 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10036 }
10037 return rc;
10038}
10039
10040
10041#ifdef IEM_WITH_SETJMP
10042/**
10043 * Stores a data dword.
10044 *
10045 * @returns Strict VBox status code.
10046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10047 * @param iSegReg The index of the segment register to use for
10048 * this access. The base and limits are checked.
10049 * @param GCPtrMem The address of the guest memory.
10050 * @param u32Value The value to store.
10051 */
10052IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10053{
10054 /* The lazy approach for now... */
10055 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10056 *pu32Dst = u32Value;
10057 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10058}
10059#endif
10060
10061
10062/**
10063 * Stores a data qword.
10064 *
10065 * @returns Strict VBox status code.
10066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10067 * @param iSegReg The index of the segment register to use for
10068 * this access. The base and limits are checked.
10069 * @param GCPtrMem The address of the guest memory.
10070 * @param u64Value The value to store.
10071 */
10072IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10073{
10074 /* The lazy approach for now... */
10075 uint64_t *pu64Dst;
10076 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10077 if (rc == VINF_SUCCESS)
10078 {
10079 *pu64Dst = u64Value;
10080 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10081 }
10082 return rc;
10083}
10084
10085
10086#ifdef IEM_WITH_SETJMP
10087/**
10088 * Stores a data qword, longjmp on error.
10089 *
10090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10091 * @param iSegReg The index of the segment register to use for
10092 * this access. The base and limits are checked.
10093 * @param GCPtrMem The address of the guest memory.
10094 * @param u64Value The value to store.
10095 */
10096IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10097{
10098 /* The lazy approach for now... */
10099 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10100 *pu64Dst = u64Value;
10101 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10102}
10103#endif
10104
10105
10106/**
10107 * Stores a data dqword.
10108 *
10109 * @returns Strict VBox status code.
10110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10111 * @param iSegReg The index of the segment register to use for
10112 * this access. The base and limits are checked.
10113 * @param GCPtrMem The address of the guest memory.
10114 * @param u128Value The value to store.
10115 */
10116IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10117{
10118 /* The lazy approach for now... */
10119 PRTUINT128U pu128Dst;
10120 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10121 if (rc == VINF_SUCCESS)
10122 {
10123 pu128Dst->au64[0] = u128Value.au64[0];
10124 pu128Dst->au64[1] = u128Value.au64[1];
10125 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10126 }
10127 return rc;
10128}
10129
10130
10131#ifdef IEM_WITH_SETJMP
10132/**
10133 * Stores a data dqword, longjmp on error.
10134 *
10135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10136 * @param iSegReg The index of the segment register to use for
10137 * this access. The base and limits are checked.
10138 * @param GCPtrMem The address of the guest memory.
10139 * @param u128Value The value to store.
10140 */
10141IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10142{
10143 /* The lazy approach for now... */
10144 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10145 pu128Dst->au64[0] = u128Value.au64[0];
10146 pu128Dst->au64[1] = u128Value.au64[1];
10147 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10148}
10149#endif
10150
10151
10152/**
10153 * Stores a data dqword, SSE aligned.
10154 *
10155 * @returns Strict VBox status code.
10156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10157 * @param iSegReg The index of the segment register to use for
10158 * this access. The base and limits are checked.
10159 * @param GCPtrMem The address of the guest memory.
10160 * @param u128Value The value to store.
10161 */
10162IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10163{
10164 /* The lazy approach for now... */
10165 if ( (GCPtrMem & 15)
10166 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10167 return iemRaiseGeneralProtectionFault0(pVCpu);
10168
10169 PRTUINT128U pu128Dst;
10170 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10171 if (rc == VINF_SUCCESS)
10172 {
10173 pu128Dst->au64[0] = u128Value.au64[0];
10174 pu128Dst->au64[1] = u128Value.au64[1];
10175 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10176 }
10177 return rc;
10178}
10179
10180
10181#ifdef IEM_WITH_SETJMP
10182/**
10183 * Stores a data dqword, SSE aligned.
10184 *
10185 * @returns Strict VBox status code.
10186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10187 * @param iSegReg The index of the segment register to use for
10188 * this access. The base and limits are checked.
10189 * @param GCPtrMem The address of the guest memory.
10190 * @param u128Value The value to store.
10191 */
10192DECL_NO_INLINE(IEM_STATIC, void)
10193iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10194{
10195 /* The lazy approach for now... */
10196 if ( (GCPtrMem & 15) == 0
10197 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10198 {
10199 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10200 pu128Dst->au64[0] = u128Value.au64[0];
10201 pu128Dst->au64[1] = u128Value.au64[1];
10202 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10203 return;
10204 }
10205
10206 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10207 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10208}
10209#endif
10210
10211
10212/**
10213 * Stores a data dqword.
10214 *
10215 * @returns Strict VBox status code.
10216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10217 * @param iSegReg The index of the segment register to use for
10218 * this access. The base and limits are checked.
10219 * @param GCPtrMem The address of the guest memory.
10220 * @param pu256Value Pointer to the value to store.
10221 */
10222IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10223{
10224 /* The lazy approach for now... */
10225 PRTUINT256U pu256Dst;
10226 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10227 if (rc == VINF_SUCCESS)
10228 {
10229 pu256Dst->au64[0] = pu256Value->au64[0];
10230 pu256Dst->au64[1] = pu256Value->au64[1];
10231 pu256Dst->au64[2] = pu256Value->au64[2];
10232 pu256Dst->au64[3] = pu256Value->au64[3];
10233 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10234 }
10235 return rc;
10236}
10237
10238
10239#ifdef IEM_WITH_SETJMP
10240/**
10241 * Stores a data dqword, longjmp on error.
10242 *
10243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10244 * @param iSegReg The index of the segment register to use for
10245 * this access. The base and limits are checked.
10246 * @param GCPtrMem The address of the guest memory.
10247 * @param pu256Value Pointer to the value to store.
10248 */
10249IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10250{
10251 /* The lazy approach for now... */
10252 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10253 pu256Dst->au64[0] = pu256Value->au64[0];
10254 pu256Dst->au64[1] = pu256Value->au64[1];
10255 pu256Dst->au64[2] = pu256Value->au64[2];
10256 pu256Dst->au64[3] = pu256Value->au64[3];
10257 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10258}
10259#endif
10260
10261
10262/**
10263 * Stores a data dqword, AVX aligned.
10264 *
10265 * @returns Strict VBox status code.
10266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10267 * @param iSegReg The index of the segment register to use for
10268 * this access. The base and limits are checked.
10269 * @param GCPtrMem The address of the guest memory.
10270 * @param pu256Value Pointer to the value to store.
10271 */
10272IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10273{
10274 /* The lazy approach for now... */
10275 if (GCPtrMem & 31)
10276 return iemRaiseGeneralProtectionFault0(pVCpu);
10277
10278 PRTUINT256U pu256Dst;
10279 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10280 if (rc == VINF_SUCCESS)
10281 {
10282 pu256Dst->au64[0] = pu256Value->au64[0];
10283 pu256Dst->au64[1] = pu256Value->au64[1];
10284 pu256Dst->au64[2] = pu256Value->au64[2];
10285 pu256Dst->au64[3] = pu256Value->au64[3];
10286 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10287 }
10288 return rc;
10289}
10290
10291
10292#ifdef IEM_WITH_SETJMP
10293/**
10294 * Stores a data dqword, AVX aligned.
10295 *
10296 * @returns Strict VBox status code.
10297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10298 * @param iSegReg The index of the segment register to use for
10299 * this access. The base and limits are checked.
10300 * @param GCPtrMem The address of the guest memory.
10301 * @param pu256Value Pointer to the value to store.
10302 */
10303DECL_NO_INLINE(IEM_STATIC, void)
10304iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10305{
10306 /* The lazy approach for now... */
10307 if ((GCPtrMem & 31) == 0)
10308 {
10309 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10310 pu256Dst->au64[0] = pu256Value->au64[0];
10311 pu256Dst->au64[1] = pu256Value->au64[1];
10312 pu256Dst->au64[2] = pu256Value->au64[2];
10313 pu256Dst->au64[3] = pu256Value->au64[3];
10314 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10315 return;
10316 }
10317
10318 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10319 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10320}
10321#endif
10322
10323
10324/**
10325 * Stores a descriptor register (sgdt, sidt).
10326 *
10327 * @returns Strict VBox status code.
10328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10329 * @param cbLimit The limit.
10330 * @param GCPtrBase The base address.
10331 * @param iSegReg The index of the segment register to use for
10332 * this access. The base and limits are checked.
10333 * @param GCPtrMem The address of the guest memory.
10334 */
10335IEM_STATIC VBOXSTRICTRC
10336iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10337{
10338 /*
10339 * The SIDT and SGDT instructions actually stores the data using two
10340 * independent writes. The instructions does not respond to opsize prefixes.
10341 */
10342 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10343 if (rcStrict == VINF_SUCCESS)
10344 {
10345 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10346 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10347 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10348 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10349 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10350 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10351 else
10352 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10353 }
10354 return rcStrict;
10355}
10356
10357
10358/**
10359 * Pushes a word onto the stack.
10360 *
10361 * @returns Strict VBox status code.
10362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10363 * @param u16Value The value to push.
10364 */
10365IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10366{
10367 /* Increment the stack pointer. */
10368 uint64_t uNewRsp;
10369 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10370
10371 /* Write the word the lazy way. */
10372 uint16_t *pu16Dst;
10373 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10374 if (rc == VINF_SUCCESS)
10375 {
10376 *pu16Dst = u16Value;
10377 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10378 }
10379
10380 /* Commit the new RSP value unless we an access handler made trouble. */
10381 if (rc == VINF_SUCCESS)
10382 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10383
10384 return rc;
10385}
10386
10387
10388/**
10389 * Pushes a dword onto the stack.
10390 *
10391 * @returns Strict VBox status code.
10392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10393 * @param u32Value The value to push.
10394 */
10395IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10396{
10397 /* Increment the stack pointer. */
10398 uint64_t uNewRsp;
10399 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10400
10401 /* Write the dword the lazy way. */
10402 uint32_t *pu32Dst;
10403 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10404 if (rc == VINF_SUCCESS)
10405 {
10406 *pu32Dst = u32Value;
10407 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10408 }
10409
10410 /* Commit the new RSP value unless we an access handler made trouble. */
10411 if (rc == VINF_SUCCESS)
10412 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10413
10414 return rc;
10415}
10416
10417
10418/**
10419 * Pushes a dword segment register value onto the stack.
10420 *
10421 * @returns Strict VBox status code.
10422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10423 * @param u32Value The value to push.
10424 */
10425IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10426{
10427 /* Increment the stack pointer. */
10428 uint64_t uNewRsp;
10429 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10430
10431 /* The intel docs talks about zero extending the selector register
10432 value. My actual intel CPU here might be zero extending the value
10433 but it still only writes the lower word... */
10434 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10435 * happens when crossing an electric page boundrary, is the high word checked
10436 * for write accessibility or not? Probably it is. What about segment limits?
10437 * It appears this behavior is also shared with trap error codes.
10438 *
10439 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10440 * ancient hardware when it actually did change. */
10441 uint16_t *pu16Dst;
10442 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10443 if (rc == VINF_SUCCESS)
10444 {
10445 *pu16Dst = (uint16_t)u32Value;
10446 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10447 }
10448
10449 /* Commit the new RSP value unless we an access handler made trouble. */
10450 if (rc == VINF_SUCCESS)
10451 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10452
10453 return rc;
10454}
10455
10456
10457/**
10458 * Pushes a qword onto the stack.
10459 *
10460 * @returns Strict VBox status code.
10461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10462 * @param u64Value The value to push.
10463 */
10464IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10465{
10466 /* Increment the stack pointer. */
10467 uint64_t uNewRsp;
10468 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10469
10470 /* Write the word the lazy way. */
10471 uint64_t *pu64Dst;
10472 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10473 if (rc == VINF_SUCCESS)
10474 {
10475 *pu64Dst = u64Value;
10476 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10477 }
10478
10479 /* Commit the new RSP value unless we an access handler made trouble. */
10480 if (rc == VINF_SUCCESS)
10481 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10482
10483 return rc;
10484}
10485
10486
10487/**
10488 * Pops a word from the stack.
10489 *
10490 * @returns Strict VBox status code.
10491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10492 * @param pu16Value Where to store the popped value.
10493 */
10494IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10495{
10496 /* Increment the stack pointer. */
10497 uint64_t uNewRsp;
10498 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10499
10500 /* Write the word the lazy way. */
10501 uint16_t const *pu16Src;
10502 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10503 if (rc == VINF_SUCCESS)
10504 {
10505 *pu16Value = *pu16Src;
10506 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10507
10508 /* Commit the new RSP value. */
10509 if (rc == VINF_SUCCESS)
10510 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10511 }
10512
10513 return rc;
10514}
10515
10516
10517/**
10518 * Pops a dword from the stack.
10519 *
10520 * @returns Strict VBox status code.
10521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10522 * @param pu32Value Where to store the popped value.
10523 */
10524IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10525{
10526 /* Increment the stack pointer. */
10527 uint64_t uNewRsp;
10528 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10529
10530 /* Write the word the lazy way. */
10531 uint32_t const *pu32Src;
10532 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10533 if (rc == VINF_SUCCESS)
10534 {
10535 *pu32Value = *pu32Src;
10536 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10537
10538 /* Commit the new RSP value. */
10539 if (rc == VINF_SUCCESS)
10540 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10541 }
10542
10543 return rc;
10544}
10545
10546
10547/**
10548 * Pops a qword from the stack.
10549 *
10550 * @returns Strict VBox status code.
10551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10552 * @param pu64Value Where to store the popped value.
10553 */
10554IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10555{
10556 /* Increment the stack pointer. */
10557 uint64_t uNewRsp;
10558 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10559
10560 /* Write the word the lazy way. */
10561 uint64_t const *pu64Src;
10562 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10563 if (rc == VINF_SUCCESS)
10564 {
10565 *pu64Value = *pu64Src;
10566 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10567
10568 /* Commit the new RSP value. */
10569 if (rc == VINF_SUCCESS)
10570 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10571 }
10572
10573 return rc;
10574}
10575
10576
10577/**
10578 * Pushes a word onto the stack, using a temporary stack pointer.
10579 *
10580 * @returns Strict VBox status code.
10581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10582 * @param u16Value The value to push.
10583 * @param pTmpRsp Pointer to the temporary stack pointer.
10584 */
10585IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10586{
10587 /* Increment the stack pointer. */
10588 RTUINT64U NewRsp = *pTmpRsp;
10589 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10590
10591 /* Write the word the lazy way. */
10592 uint16_t *pu16Dst;
10593 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10594 if (rc == VINF_SUCCESS)
10595 {
10596 *pu16Dst = u16Value;
10597 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10598 }
10599
10600 /* Commit the new RSP value unless we an access handler made trouble. */
10601 if (rc == VINF_SUCCESS)
10602 *pTmpRsp = NewRsp;
10603
10604 return rc;
10605}
10606
10607
10608/**
10609 * Pushes a dword onto the stack, using a temporary stack pointer.
10610 *
10611 * @returns Strict VBox status code.
10612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10613 * @param u32Value The value to push.
10614 * @param pTmpRsp Pointer to the temporary stack pointer.
10615 */
10616IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10617{
10618 /* Increment the stack pointer. */
10619 RTUINT64U NewRsp = *pTmpRsp;
10620 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10621
10622 /* Write the word the lazy way. */
10623 uint32_t *pu32Dst;
10624 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10625 if (rc == VINF_SUCCESS)
10626 {
10627 *pu32Dst = u32Value;
10628 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10629 }
10630
10631 /* Commit the new RSP value unless we an access handler made trouble. */
10632 if (rc == VINF_SUCCESS)
10633 *pTmpRsp = NewRsp;
10634
10635 return rc;
10636}
10637
10638
10639/**
10640 * Pushes a dword onto the stack, using a temporary stack pointer.
10641 *
10642 * @returns Strict VBox status code.
10643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10644 * @param u64Value The value to push.
10645 * @param pTmpRsp Pointer to the temporary stack pointer.
10646 */
10647IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10648{
10649 /* Increment the stack pointer. */
10650 RTUINT64U NewRsp = *pTmpRsp;
10651 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10652
10653 /* Write the word the lazy way. */
10654 uint64_t *pu64Dst;
10655 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10656 if (rc == VINF_SUCCESS)
10657 {
10658 *pu64Dst = u64Value;
10659 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10660 }
10661
10662 /* Commit the new RSP value unless we an access handler made trouble. */
10663 if (rc == VINF_SUCCESS)
10664 *pTmpRsp = NewRsp;
10665
10666 return rc;
10667}
10668
10669
10670/**
10671 * Pops a word from the stack, using a temporary stack pointer.
10672 *
10673 * @returns Strict VBox status code.
10674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10675 * @param pu16Value Where to store the popped value.
10676 * @param pTmpRsp Pointer to the temporary stack pointer.
10677 */
10678IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10679{
10680 /* Increment the stack pointer. */
10681 RTUINT64U NewRsp = *pTmpRsp;
10682 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10683
10684 /* Write the word the lazy way. */
10685 uint16_t const *pu16Src;
10686 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10687 if (rc == VINF_SUCCESS)
10688 {
10689 *pu16Value = *pu16Src;
10690 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10691
10692 /* Commit the new RSP value. */
10693 if (rc == VINF_SUCCESS)
10694 *pTmpRsp = NewRsp;
10695 }
10696
10697 return rc;
10698}
10699
10700
10701/**
10702 * Pops a dword from the stack, using a temporary stack pointer.
10703 *
10704 * @returns Strict VBox status code.
10705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10706 * @param pu32Value Where to store the popped value.
10707 * @param pTmpRsp Pointer to the temporary stack pointer.
10708 */
10709IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10710{
10711 /* Increment the stack pointer. */
10712 RTUINT64U NewRsp = *pTmpRsp;
10713 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10714
10715 /* Write the word the lazy way. */
10716 uint32_t const *pu32Src;
10717 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10718 if (rc == VINF_SUCCESS)
10719 {
10720 *pu32Value = *pu32Src;
10721 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10722
10723 /* Commit the new RSP value. */
10724 if (rc == VINF_SUCCESS)
10725 *pTmpRsp = NewRsp;
10726 }
10727
10728 return rc;
10729}
10730
10731
10732/**
10733 * Pops a qword from the stack, using a temporary stack pointer.
10734 *
10735 * @returns Strict VBox status code.
10736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10737 * @param pu64Value Where to store the popped value.
10738 * @param pTmpRsp Pointer to the temporary stack pointer.
10739 */
10740IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10741{
10742 /* Increment the stack pointer. */
10743 RTUINT64U NewRsp = *pTmpRsp;
10744 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10745
10746 /* Write the word the lazy way. */
10747 uint64_t const *pu64Src;
10748 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10749 if (rcStrict == VINF_SUCCESS)
10750 {
10751 *pu64Value = *pu64Src;
10752 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10753
10754 /* Commit the new RSP value. */
10755 if (rcStrict == VINF_SUCCESS)
10756 *pTmpRsp = NewRsp;
10757 }
10758
10759 return rcStrict;
10760}
10761
10762
10763/**
10764 * Begin a special stack push (used by interrupt, exceptions and such).
10765 *
10766 * This will raise \#SS or \#PF if appropriate.
10767 *
10768 * @returns Strict VBox status code.
10769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10770 * @param cbMem The number of bytes to push onto the stack.
10771 * @param ppvMem Where to return the pointer to the stack memory.
10772 * As with the other memory functions this could be
10773 * direct access or bounce buffered access, so
10774 * don't commit register until the commit call
10775 * succeeds.
10776 * @param puNewRsp Where to return the new RSP value. This must be
10777 * passed unchanged to
10778 * iemMemStackPushCommitSpecial().
10779 */
10780IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10781{
10782 Assert(cbMem < UINT8_MAX);
10783 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10784 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10785}
10786
10787
10788/**
10789 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10790 *
10791 * This will update the rSP.
10792 *
10793 * @returns Strict VBox status code.
10794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10795 * @param pvMem The pointer returned by
10796 * iemMemStackPushBeginSpecial().
10797 * @param uNewRsp The new RSP value returned by
10798 * iemMemStackPushBeginSpecial().
10799 */
10800IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10801{
10802 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10803 if (rcStrict == VINF_SUCCESS)
10804 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10805 return rcStrict;
10806}
10807
10808
10809/**
10810 * Begin a special stack pop (used by iret, retf and such).
10811 *
10812 * This will raise \#SS or \#PF if appropriate.
10813 *
10814 * @returns Strict VBox status code.
10815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10816 * @param cbMem The number of bytes to pop from the stack.
10817 * @param ppvMem Where to return the pointer to the stack memory.
10818 * @param puNewRsp Where to return the new RSP value. This must be
10819 * assigned to CPUMCTX::rsp manually some time
10820 * after iemMemStackPopDoneSpecial() has been
10821 * called.
10822 */
10823IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10824{
10825 Assert(cbMem < UINT8_MAX);
10826 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10827 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10828}
10829
10830
10831/**
10832 * Continue a special stack pop (used by iret and retf).
10833 *
10834 * This will raise \#SS or \#PF if appropriate.
10835 *
10836 * @returns Strict VBox status code.
10837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10838 * @param cbMem The number of bytes to pop from the stack.
10839 * @param ppvMem Where to return the pointer to the stack memory.
10840 * @param puNewRsp Where to return the new RSP value. This must be
10841 * assigned to CPUMCTX::rsp manually some time
10842 * after iemMemStackPopDoneSpecial() has been
10843 * called.
10844 */
10845IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10846{
10847 Assert(cbMem < UINT8_MAX);
10848 RTUINT64U NewRsp;
10849 NewRsp.u = *puNewRsp;
10850 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10851 *puNewRsp = NewRsp.u;
10852 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10853}
10854
10855
10856/**
10857 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10858 * iemMemStackPopContinueSpecial).
10859 *
10860 * The caller will manually commit the rSP.
10861 *
10862 * @returns Strict VBox status code.
10863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10864 * @param pvMem The pointer returned by
10865 * iemMemStackPopBeginSpecial() or
10866 * iemMemStackPopContinueSpecial().
10867 */
10868IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10869{
10870 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10871}
10872
10873
10874/**
10875 * Fetches a system table byte.
10876 *
10877 * @returns Strict VBox status code.
10878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10879 * @param pbDst Where to return the byte.
10880 * @param iSegReg The index of the segment register to use for
10881 * this access. The base and limits are checked.
10882 * @param GCPtrMem The address of the guest memory.
10883 */
10884IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10885{
10886 /* The lazy approach for now... */
10887 uint8_t const *pbSrc;
10888 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10889 if (rc == VINF_SUCCESS)
10890 {
10891 *pbDst = *pbSrc;
10892 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10893 }
10894 return rc;
10895}
10896
10897
10898/**
10899 * Fetches a system table word.
10900 *
10901 * @returns Strict VBox status code.
10902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10903 * @param pu16Dst Where to return the word.
10904 * @param iSegReg The index of the segment register to use for
10905 * this access. The base and limits are checked.
10906 * @param GCPtrMem The address of the guest memory.
10907 */
10908IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10909{
10910 /* The lazy approach for now... */
10911 uint16_t const *pu16Src;
10912 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10913 if (rc == VINF_SUCCESS)
10914 {
10915 *pu16Dst = *pu16Src;
10916 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10917 }
10918 return rc;
10919}
10920
10921
10922/**
10923 * Fetches a system table dword.
10924 *
10925 * @returns Strict VBox status code.
10926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10927 * @param pu32Dst Where to return the dword.
10928 * @param iSegReg The index of the segment register to use for
10929 * this access. The base and limits are checked.
10930 * @param GCPtrMem The address of the guest memory.
10931 */
10932IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10933{
10934 /* The lazy approach for now... */
10935 uint32_t const *pu32Src;
10936 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10937 if (rc == VINF_SUCCESS)
10938 {
10939 *pu32Dst = *pu32Src;
10940 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10941 }
10942 return rc;
10943}
10944
10945
10946/**
10947 * Fetches a system table qword.
10948 *
10949 * @returns Strict VBox status code.
10950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10951 * @param pu64Dst Where to return the qword.
10952 * @param iSegReg The index of the segment register to use for
10953 * this access. The base and limits are checked.
10954 * @param GCPtrMem The address of the guest memory.
10955 */
10956IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10957{
10958 /* The lazy approach for now... */
10959 uint64_t const *pu64Src;
10960 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10961 if (rc == VINF_SUCCESS)
10962 {
10963 *pu64Dst = *pu64Src;
10964 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10965 }
10966 return rc;
10967}
10968
10969
10970/**
10971 * Fetches a descriptor table entry with caller specified error code.
10972 *
10973 * @returns Strict VBox status code.
10974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10975 * @param pDesc Where to return the descriptor table entry.
10976 * @param uSel The selector which table entry to fetch.
10977 * @param uXcpt The exception to raise on table lookup error.
10978 * @param uErrorCode The error code associated with the exception.
10979 */
10980IEM_STATIC VBOXSTRICTRC
10981iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10982{
10983 AssertPtr(pDesc);
10984 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10985
10986 /** @todo did the 286 require all 8 bytes to be accessible? */
10987 /*
10988 * Get the selector table base and check bounds.
10989 */
10990 RTGCPTR GCPtrBase;
10991 if (uSel & X86_SEL_LDT)
10992 {
10993 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10994 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10995 {
10996 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10997 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10998 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10999 uErrorCode, 0);
11000 }
11001
11002 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
11003 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
11004 }
11005 else
11006 {
11007 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
11008 {
11009 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11010 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11011 uErrorCode, 0);
11012 }
11013 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11014 }
11015
11016 /*
11017 * Read the legacy descriptor and maybe the long mode extensions if
11018 * required.
11019 */
11020 VBOXSTRICTRC rcStrict;
11021 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11022 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11023 else
11024 {
11025 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11026 if (rcStrict == VINF_SUCCESS)
11027 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11028 if (rcStrict == VINF_SUCCESS)
11029 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11030 if (rcStrict == VINF_SUCCESS)
11031 pDesc->Legacy.au16[3] = 0;
11032 else
11033 return rcStrict;
11034 }
11035
11036 if (rcStrict == VINF_SUCCESS)
11037 {
11038 if ( !IEM_IS_LONG_MODE(pVCpu)
11039 || pDesc->Legacy.Gen.u1DescType)
11040 pDesc->Long.au64[1] = 0;
11041 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11042 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11043 else
11044 {
11045 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11046 /** @todo is this the right exception? */
11047 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11048 }
11049 }
11050 return rcStrict;
11051}
11052
11053
11054/**
11055 * Fetches a descriptor table entry.
11056 *
11057 * @returns Strict VBox status code.
11058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11059 * @param pDesc Where to return the descriptor table entry.
11060 * @param uSel The selector which table entry to fetch.
11061 * @param uXcpt The exception to raise on table lookup error.
11062 */
11063IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11064{
11065 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11066}
11067
11068
11069/**
11070 * Fakes a long mode stack selector for SS = 0.
11071 *
11072 * @param pDescSs Where to return the fake stack descriptor.
11073 * @param uDpl The DPL we want.
11074 */
11075IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11076{
11077 pDescSs->Long.au64[0] = 0;
11078 pDescSs->Long.au64[1] = 0;
11079 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11080 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11081 pDescSs->Long.Gen.u2Dpl = uDpl;
11082 pDescSs->Long.Gen.u1Present = 1;
11083 pDescSs->Long.Gen.u1Long = 1;
11084}
11085
11086
11087/**
11088 * Marks the selector descriptor as accessed (only non-system descriptors).
11089 *
11090 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11091 * will therefore skip the limit checks.
11092 *
11093 * @returns Strict VBox status code.
11094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11095 * @param uSel The selector.
11096 */
11097IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11098{
11099 /*
11100 * Get the selector table base and calculate the entry address.
11101 */
11102 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11103 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11104 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11105 GCPtr += uSel & X86_SEL_MASK;
11106
11107 /*
11108 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11109 * ugly stuff to avoid this. This will make sure it's an atomic access
11110 * as well more or less remove any question about 8-bit or 32-bit accesss.
11111 */
11112 VBOXSTRICTRC rcStrict;
11113 uint32_t volatile *pu32;
11114 if ((GCPtr & 3) == 0)
11115 {
11116 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11117 GCPtr += 2 + 2;
11118 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11119 if (rcStrict != VINF_SUCCESS)
11120 return rcStrict;
11121 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11122 }
11123 else
11124 {
11125 /* The misaligned GDT/LDT case, map the whole thing. */
11126 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11127 if (rcStrict != VINF_SUCCESS)
11128 return rcStrict;
11129 switch ((uintptr_t)pu32 & 3)
11130 {
11131 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11132 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11133 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11134 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11135 }
11136 }
11137
11138 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11139}
11140
11141/** @} */
11142
11143
11144/*
11145 * Include the C/C++ implementation of instruction.
11146 */
11147#include "IEMAllCImpl.cpp.h"
11148
11149
11150
11151/** @name "Microcode" macros.
11152 *
11153 * The idea is that we should be able to use the same code to interpret
11154 * instructions as well as recompiler instructions. Thus this obfuscation.
11155 *
11156 * @{
11157 */
11158#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11159#define IEM_MC_END() }
11160#define IEM_MC_PAUSE() do {} while (0)
11161#define IEM_MC_CONTINUE() do {} while (0)
11162
11163/** Internal macro. */
11164#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11165 do \
11166 { \
11167 VBOXSTRICTRC rcStrict2 = a_Expr; \
11168 if (rcStrict2 != VINF_SUCCESS) \
11169 return rcStrict2; \
11170 } while (0)
11171
11172
11173#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11174#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11175#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11176#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11177#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11178#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11179#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11180#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11181#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11182 do { \
11183 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11184 return iemRaiseDeviceNotAvailable(pVCpu); \
11185 } while (0)
11186#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11187 do { \
11188 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11189 return iemRaiseDeviceNotAvailable(pVCpu); \
11190 } while (0)
11191#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11192 do { \
11193 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11194 return iemRaiseMathFault(pVCpu); \
11195 } while (0)
11196#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11197 do { \
11198 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11199 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11200 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11201 return iemRaiseUndefinedOpcode(pVCpu); \
11202 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11203 return iemRaiseDeviceNotAvailable(pVCpu); \
11204 } while (0)
11205#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11206 do { \
11207 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11208 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11209 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11210 return iemRaiseUndefinedOpcode(pVCpu); \
11211 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11212 return iemRaiseDeviceNotAvailable(pVCpu); \
11213 } while (0)
11214#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11215 do { \
11216 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11217 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11218 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11219 return iemRaiseUndefinedOpcode(pVCpu); \
11220 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11221 return iemRaiseDeviceNotAvailable(pVCpu); \
11222 } while (0)
11223#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11224 do { \
11225 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11226 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11227 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11228 return iemRaiseUndefinedOpcode(pVCpu); \
11229 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11230 return iemRaiseDeviceNotAvailable(pVCpu); \
11231 } while (0)
11232#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11233 do { \
11234 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11235 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11236 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11237 return iemRaiseUndefinedOpcode(pVCpu); \
11238 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11239 return iemRaiseDeviceNotAvailable(pVCpu); \
11240 } while (0)
11241#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11242 do { \
11243 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11244 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11245 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11246 return iemRaiseUndefinedOpcode(pVCpu); \
11247 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11248 return iemRaiseDeviceNotAvailable(pVCpu); \
11249 } while (0)
11250#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11251 do { \
11252 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11253 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11254 return iemRaiseUndefinedOpcode(pVCpu); \
11255 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11256 return iemRaiseDeviceNotAvailable(pVCpu); \
11257 } while (0)
11258#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11259 do { \
11260 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11261 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11262 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11263 return iemRaiseUndefinedOpcode(pVCpu); \
11264 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11265 return iemRaiseDeviceNotAvailable(pVCpu); \
11266 } while (0)
11267#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11268 do { \
11269 if (pVCpu->iem.s.uCpl != 0) \
11270 return iemRaiseGeneralProtectionFault0(pVCpu); \
11271 } while (0)
11272#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11273 do { \
11274 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11275 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11276 } while (0)
11277#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11278 do { \
11279 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11280 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11281 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11282 return iemRaiseUndefinedOpcode(pVCpu); \
11283 } while (0)
11284#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11285 do { \
11286 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11287 return iemRaiseGeneralProtectionFault0(pVCpu); \
11288 } while (0)
11289
11290
11291#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11292#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11293#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11294#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11295#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11296#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11297#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11298 uint32_t a_Name; \
11299 uint32_t *a_pName = &a_Name
11300#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11301 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11302
11303#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11304#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11305
11306#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11318#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11319#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11320#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11321#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11322#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11323#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11324 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11325 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11326 } while (0)
11327#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11328 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11329 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11330 } while (0)
11331#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11332 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11333 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11334 } while (0)
11335/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11336#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11337 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11338 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11339 } while (0)
11340#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11341 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11342 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11343 } while (0)
11344/** @note Not for IOPL or IF testing or modification. */
11345#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11346#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11347#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11348#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11349
11350#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11351#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11352#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11353#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11354#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11355#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11356#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11357#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11358#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11359#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11360/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11361#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11362 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11363 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11364 } while (0)
11365#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11366 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11367 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11368 } while (0)
11369#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11370 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11371
11372
11373#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11374#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11375/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11376 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11377#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11378#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11379/** @note Not for IOPL or IF testing or modification. */
11380#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11381
11382#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11383#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11384#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11385 do { \
11386 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11387 *pu32Reg += (a_u32Value); \
11388 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11389 } while (0)
11390#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11391
11392#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11393#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11394#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11395 do { \
11396 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11397 *pu32Reg -= (a_u32Value); \
11398 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11399 } while (0)
11400#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11401#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11402
11403#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11404#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11405#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11406#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11407#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11408#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11409#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11410
11411#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11412#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11413#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11414#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11415
11416#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11417#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11418#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11419
11420#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11421#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11422#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11423
11424#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11425#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11426#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11427
11428#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11429#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11430#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11431
11432#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11433
11434#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11435
11436#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11437#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11438#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11439 do { \
11440 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11441 *pu32Reg &= (a_u32Value); \
11442 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11443 } while (0)
11444#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11445
11446#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11447#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11448#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11449 do { \
11450 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11451 *pu32Reg |= (a_u32Value); \
11452 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11453 } while (0)
11454#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11455
11456
11457/** @note Not for IOPL or IF modification. */
11458#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11459/** @note Not for IOPL or IF modification. */
11460#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11461/** @note Not for IOPL or IF modification. */
11462#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11463
11464#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11465
11466/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11467#define IEM_MC_FPU_TO_MMX_MODE() do { \
11468 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11469 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11470 } while (0)
11471
11472/** Switches the FPU state from MMX mode (FTW=0xffff). */
11473#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11474 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11475 } while (0)
11476
11477#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11478 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11479#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11480 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11481#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11482 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11483 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11484 } while (0)
11485#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11486 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11487 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11488 } while (0)
11489#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11490 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11491#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11492 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11493#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11494 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11495
11496#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11497 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11498 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11499 } while (0)
11500#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11501 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11502#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11503 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11504#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11505 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11506#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11507 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11508 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11509 } while (0)
11510#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11511 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11512#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11513 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11514 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11515 } while (0)
11516#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11517 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11518#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11519 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11520 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11521 } while (0)
11522#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11523 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11524#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11525 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11526#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11527 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11528#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11529 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11530#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11531 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11532 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11533 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11534 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11535 } while (0)
11536
11537#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11538 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11539 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11540 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11541 } while (0)
11542#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11543 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11544 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11545 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11546 } while (0)
11547#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11548 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11549 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11550 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11551 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11552 } while (0)
11553#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11554 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11555 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11556 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11557 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11558 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11559 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11560 } while (0)
11561
11562#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11563#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11564 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11565 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11568 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11570 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11571 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11572 } while (0)
11573#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11574 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11575 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11576 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11577 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11578 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11580 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11581 } while (0)
11582#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11583 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11584 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11586 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11589 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11590 } while (0)
11591#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11592 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11593 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11594 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11595 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11596 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11598 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11599 } while (0)
11600
11601#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11602 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11603#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11604 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11605#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11606 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11607#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11608 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11609 uintptr_t const iYRegTmp = (a_iYReg); \
11610 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11611 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11612 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11613 } while (0)
11614
11615#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11616 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11617 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11618 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11619 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11620 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11621 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11622 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11623 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11624 } while (0)
11625#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11626 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11627 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11628 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11629 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11630 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11631 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11632 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11633 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11634 } while (0)
11635#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11636 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11637 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11638 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11639 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11640 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11641 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11642 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11643 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11644 } while (0)
11645
11646#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11647 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11648 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11649 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11650 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11651 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11652 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11653 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11654 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11655 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11656 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11657 } while (0)
11658#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11659 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11660 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11661 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11662 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11663 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11664 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11665 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11666 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11667 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11668 } while (0)
11669#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11670 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11671 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11672 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11673 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11674 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11675 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11676 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11677 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11678 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11679 } while (0)
11680#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11681 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11682 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11683 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11684 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11685 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11686 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11687 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11688 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11689 } while (0)
11690
11691#ifndef IEM_WITH_SETJMP
11692# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11694# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11696# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11698#else
11699# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11700 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11701# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11702 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11703# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11704 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11705#endif
11706
11707#ifndef IEM_WITH_SETJMP
11708# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11709 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11710# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11712# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11714#else
11715# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11716 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11717# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11718 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11719# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11720 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11721#endif
11722
11723#ifndef IEM_WITH_SETJMP
11724# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11726# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11728# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11730#else
11731# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11732 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11733# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11734 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11735# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11736 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11737#endif
11738
11739#ifdef SOME_UNUSED_FUNCTION
11740# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11742#endif
11743
11744#ifndef IEM_WITH_SETJMP
11745# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11749# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11751# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11753#else
11754# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11755 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11757 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11758# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11759 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11760# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11761 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11762#endif
11763
11764#ifndef IEM_WITH_SETJMP
11765# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11767# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11769# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11771#else
11772# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11773 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11774# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11775 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11776# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11777 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11778#endif
11779
11780#ifndef IEM_WITH_SETJMP
11781# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11783# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11785#else
11786# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11787 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11788# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11789 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11790#endif
11791
11792#ifndef IEM_WITH_SETJMP
11793# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11795# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11796 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11797#else
11798# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11799 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11800# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11801 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11802#endif
11803
11804
11805
11806#ifndef IEM_WITH_SETJMP
11807# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11808 do { \
11809 uint8_t u8Tmp; \
11810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11811 (a_u16Dst) = u8Tmp; \
11812 } while (0)
11813# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11814 do { \
11815 uint8_t u8Tmp; \
11816 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11817 (a_u32Dst) = u8Tmp; \
11818 } while (0)
11819# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11820 do { \
11821 uint8_t u8Tmp; \
11822 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11823 (a_u64Dst) = u8Tmp; \
11824 } while (0)
11825# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11826 do { \
11827 uint16_t u16Tmp; \
11828 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11829 (a_u32Dst) = u16Tmp; \
11830 } while (0)
11831# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11832 do { \
11833 uint16_t u16Tmp; \
11834 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11835 (a_u64Dst) = u16Tmp; \
11836 } while (0)
11837# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11838 do { \
11839 uint32_t u32Tmp; \
11840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11841 (a_u64Dst) = u32Tmp; \
11842 } while (0)
11843#else /* IEM_WITH_SETJMP */
11844# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11845 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11846# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11847 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11848# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11849 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11850# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11851 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11852# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11853 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11854# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11855 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11856#endif /* IEM_WITH_SETJMP */
11857
11858#ifndef IEM_WITH_SETJMP
11859# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11860 do { \
11861 uint8_t u8Tmp; \
11862 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11863 (a_u16Dst) = (int8_t)u8Tmp; \
11864 } while (0)
11865# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11866 do { \
11867 uint8_t u8Tmp; \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11869 (a_u32Dst) = (int8_t)u8Tmp; \
11870 } while (0)
11871# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11872 do { \
11873 uint8_t u8Tmp; \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11875 (a_u64Dst) = (int8_t)u8Tmp; \
11876 } while (0)
11877# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11878 do { \
11879 uint16_t u16Tmp; \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11881 (a_u32Dst) = (int16_t)u16Tmp; \
11882 } while (0)
11883# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11884 do { \
11885 uint16_t u16Tmp; \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11887 (a_u64Dst) = (int16_t)u16Tmp; \
11888 } while (0)
11889# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11890 do { \
11891 uint32_t u32Tmp; \
11892 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11893 (a_u64Dst) = (int32_t)u32Tmp; \
11894 } while (0)
11895#else /* IEM_WITH_SETJMP */
11896# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11897 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11898# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11899 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11900# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11901 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11902# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11903 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11904# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11905 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11906# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11907 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11908#endif /* IEM_WITH_SETJMP */
11909
11910#ifndef IEM_WITH_SETJMP
11911# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11912 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11913# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11914 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11915# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11916 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11917# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11918 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11919#else
11920# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11921 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11922# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11923 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11924# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11925 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11926# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11927 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11928#endif
11929
11930#ifndef IEM_WITH_SETJMP
11931# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11932 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11933# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11934 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11935# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11936 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11937# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11938 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11939#else
11940# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11941 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11942# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11943 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11944# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11945 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11946# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11947 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11948#endif
11949
11950#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11951#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11952#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11953#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11954#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11955#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11956#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11957 do { \
11958 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11959 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11960 } while (0)
11961
11962#ifndef IEM_WITH_SETJMP
11963# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11965# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11966 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11967#else
11968# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11969 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11970# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11971 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11972#endif
11973
11974#ifndef IEM_WITH_SETJMP
11975# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11976 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11977# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11978 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11979#else
11980# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11981 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11982# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11983 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11984#endif
11985
11986
11987#define IEM_MC_PUSH_U16(a_u16Value) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11989#define IEM_MC_PUSH_U32(a_u32Value) \
11990 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11991#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11992 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11993#define IEM_MC_PUSH_U64(a_u64Value) \
11994 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11995
11996#define IEM_MC_POP_U16(a_pu16Value) \
11997 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11998#define IEM_MC_POP_U32(a_pu32Value) \
11999 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
12000#define IEM_MC_POP_U64(a_pu64Value) \
12001 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
12002
12003/** Maps guest memory for direct or bounce buffered access.
12004 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12005 * @remarks May return.
12006 */
12007#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12008 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12009
12010/** Maps guest memory for direct or bounce buffered access.
12011 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12012 * @remarks May return.
12013 */
12014#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12015 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12016
12017/** Commits the memory and unmaps the guest memory.
12018 * @remarks May return.
12019 */
12020#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12021 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12022
12023/** Commits the memory and unmaps the guest memory unless the FPU status word
12024 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12025 * that would cause FLD not to store.
12026 *
12027 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12028 * store, while \#P will not.
12029 *
12030 * @remarks May in theory return - for now.
12031 */
12032#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12033 do { \
12034 if ( !(a_u16FSW & X86_FSW_ES) \
12035 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12036 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12037 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12038 } while (0)
12039
12040/** Calculate efficient address from R/M. */
12041#ifndef IEM_WITH_SETJMP
12042# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12043 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12044#else
12045# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12046 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12047#endif
12048
12049#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12050#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12051#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12052#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12053#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12054#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12055#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12056
12057/**
12058 * Defers the rest of the instruction emulation to a C implementation routine
12059 * and returns, only taking the standard parameters.
12060 *
12061 * @param a_pfnCImpl The pointer to the C routine.
12062 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12063 */
12064#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12065
12066/**
12067 * Defers the rest of instruction emulation to a C implementation routine and
12068 * returns, taking one argument in addition to the standard ones.
12069 *
12070 * @param a_pfnCImpl The pointer to the C routine.
12071 * @param a0 The argument.
12072 */
12073#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12074
12075/**
12076 * Defers the rest of the instruction emulation to a C implementation routine
12077 * and returns, taking two arguments in addition to the standard ones.
12078 *
12079 * @param a_pfnCImpl The pointer to the C routine.
12080 * @param a0 The first extra argument.
12081 * @param a1 The second extra argument.
12082 */
12083#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12084
12085/**
12086 * Defers the rest of the instruction emulation to a C implementation routine
12087 * and returns, taking three arguments in addition to the standard ones.
12088 *
12089 * @param a_pfnCImpl The pointer to the C routine.
12090 * @param a0 The first extra argument.
12091 * @param a1 The second extra argument.
12092 * @param a2 The third extra argument.
12093 */
12094#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12095
12096/**
12097 * Defers the rest of the instruction emulation to a C implementation routine
12098 * and returns, taking four arguments in addition to the standard ones.
12099 *
12100 * @param a_pfnCImpl The pointer to the C routine.
12101 * @param a0 The first extra argument.
12102 * @param a1 The second extra argument.
12103 * @param a2 The third extra argument.
12104 * @param a3 The fourth extra argument.
12105 */
12106#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12107
12108/**
12109 * Defers the rest of the instruction emulation to a C implementation routine
12110 * and returns, taking two arguments in addition to the standard ones.
12111 *
12112 * @param a_pfnCImpl The pointer to the C routine.
12113 * @param a0 The first extra argument.
12114 * @param a1 The second extra argument.
12115 * @param a2 The third extra argument.
12116 * @param a3 The fourth extra argument.
12117 * @param a4 The fifth extra argument.
12118 */
12119#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12120
12121/**
12122 * Defers the entire instruction emulation to a C implementation routine and
12123 * returns, only taking the standard parameters.
12124 *
12125 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12126 *
12127 * @param a_pfnCImpl The pointer to the C routine.
12128 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12129 */
12130#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12131
12132/**
12133 * Defers the entire instruction emulation to a C implementation routine and
12134 * returns, taking one argument in addition to the standard ones.
12135 *
12136 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12137 *
12138 * @param a_pfnCImpl The pointer to the C routine.
12139 * @param a0 The argument.
12140 */
12141#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12142
12143/**
12144 * Defers the entire instruction emulation to a C implementation routine and
12145 * returns, taking two arguments in addition to the standard ones.
12146 *
12147 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12148 *
12149 * @param a_pfnCImpl The pointer to the C routine.
12150 * @param a0 The first extra argument.
12151 * @param a1 The second extra argument.
12152 */
12153#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12154
12155/**
12156 * Defers the entire instruction emulation to a C implementation routine and
12157 * returns, taking three arguments in addition to the standard ones.
12158 *
12159 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12160 *
12161 * @param a_pfnCImpl The pointer to the C routine.
12162 * @param a0 The first extra argument.
12163 * @param a1 The second extra argument.
12164 * @param a2 The third extra argument.
12165 */
12166#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12167
12168/**
12169 * Calls a FPU assembly implementation taking one visible argument.
12170 *
12171 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12172 * @param a0 The first extra argument.
12173 */
12174#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12175 do { \
12176 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12177 } while (0)
12178
12179/**
12180 * Calls a FPU assembly implementation taking two visible arguments.
12181 *
12182 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12183 * @param a0 The first extra argument.
12184 * @param a1 The second extra argument.
12185 */
12186#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12187 do { \
12188 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12189 } while (0)
12190
12191/**
12192 * Calls a FPU assembly implementation taking three visible arguments.
12193 *
12194 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12195 * @param a0 The first extra argument.
12196 * @param a1 The second extra argument.
12197 * @param a2 The third extra argument.
12198 */
12199#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12200 do { \
12201 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12202 } while (0)
12203
12204#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12205 do { \
12206 (a_FpuData).FSW = (a_FSW); \
12207 (a_FpuData).r80Result = *(a_pr80Value); \
12208 } while (0)
12209
12210/** Pushes FPU result onto the stack. */
12211#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12212 iemFpuPushResult(pVCpu, &a_FpuData)
12213/** Pushes FPU result onto the stack and sets the FPUDP. */
12214#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12215 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12216
12217/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12218#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12219 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12220
12221/** Stores FPU result in a stack register. */
12222#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12223 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12224/** Stores FPU result in a stack register and pops the stack. */
12225#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12226 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12227/** Stores FPU result in a stack register and sets the FPUDP. */
12228#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12229 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12230/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12231 * stack. */
12232#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12233 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12234
12235/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12236#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12237 iemFpuUpdateOpcodeAndIp(pVCpu)
12238/** Free a stack register (for FFREE and FFREEP). */
12239#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12240 iemFpuStackFree(pVCpu, a_iStReg)
12241/** Increment the FPU stack pointer. */
12242#define IEM_MC_FPU_STACK_INC_TOP() \
12243 iemFpuStackIncTop(pVCpu)
12244/** Decrement the FPU stack pointer. */
12245#define IEM_MC_FPU_STACK_DEC_TOP() \
12246 iemFpuStackDecTop(pVCpu)
12247
12248/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12249#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12250 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12251/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12252#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12253 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12254/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12255#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12256 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12257/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12258#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12259 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12260/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12261 * stack. */
12262#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12263 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12264/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12265#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12266 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12267
12268/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12269#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12270 iemFpuStackUnderflow(pVCpu, a_iStDst)
12271/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12272 * stack. */
12273#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12274 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12275/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12276 * FPUDS. */
12277#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12278 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12279/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12280 * FPUDS. Pops stack. */
12281#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12282 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12283/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12284 * stack twice. */
12285#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12286 iemFpuStackUnderflowThenPopPop(pVCpu)
12287/** Raises a FPU stack underflow exception for an instruction pushing a result
12288 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12289#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12290 iemFpuStackPushUnderflow(pVCpu)
12291/** Raises a FPU stack underflow exception for an instruction pushing a result
12292 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12293#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12294 iemFpuStackPushUnderflowTwo(pVCpu)
12295
12296/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12297 * FPUIP, FPUCS and FOP. */
12298#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12299 iemFpuStackPushOverflow(pVCpu)
12300/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12301 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12302#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12303 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12304/** Prepares for using the FPU state.
12305 * Ensures that we can use the host FPU in the current context (RC+R0.
12306 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12307#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12308/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12309#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12310/** Actualizes the guest FPU state so it can be accessed and modified. */
12311#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12312
12313/** Prepares for using the SSE state.
12314 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12315 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12316#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12317/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12318#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12319/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12320#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12321
12322/** Prepares for using the AVX state.
12323 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12324 * Ensures the guest AVX state in the CPUMCTX is up to date.
12325 * @note This will include the AVX512 state too when support for it is added
12326 * due to the zero extending feature of VEX instruction. */
12327#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12328/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12329#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12330/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12331#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12332
12333/**
12334 * Calls a MMX assembly implementation taking two visible arguments.
12335 *
12336 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12337 * @param a0 The first extra argument.
12338 * @param a1 The second extra argument.
12339 */
12340#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12341 do { \
12342 IEM_MC_PREPARE_FPU_USAGE(); \
12343 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12344 } while (0)
12345
12346/**
12347 * Calls a MMX assembly implementation taking three visible arguments.
12348 *
12349 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12350 * @param a0 The first extra argument.
12351 * @param a1 The second extra argument.
12352 * @param a2 The third extra argument.
12353 */
12354#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12355 do { \
12356 IEM_MC_PREPARE_FPU_USAGE(); \
12357 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12358 } while (0)
12359
12360
12361/**
12362 * Calls a SSE assembly implementation taking two visible arguments.
12363 *
12364 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12365 * @param a0 The first extra argument.
12366 * @param a1 The second extra argument.
12367 */
12368#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12369 do { \
12370 IEM_MC_PREPARE_SSE_USAGE(); \
12371 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12372 } while (0)
12373
12374/**
12375 * Calls a SSE assembly implementation taking three visible arguments.
12376 *
12377 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12378 * @param a0 The first extra argument.
12379 * @param a1 The second extra argument.
12380 * @param a2 The third extra argument.
12381 */
12382#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12383 do { \
12384 IEM_MC_PREPARE_SSE_USAGE(); \
12385 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12386 } while (0)
12387
12388
12389/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12390 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12391#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12392 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12393
12394/**
12395 * Calls a AVX assembly implementation taking two visible arguments.
12396 *
12397 * There is one implicit zero'th argument, a pointer to the extended state.
12398 *
12399 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12400 * @param a1 The first extra argument.
12401 * @param a2 The second extra argument.
12402 */
12403#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12404 do { \
12405 IEM_MC_PREPARE_AVX_USAGE(); \
12406 a_pfnAImpl(pXState, (a1), (a2)); \
12407 } while (0)
12408
12409/**
12410 * Calls a AVX assembly implementation taking three visible arguments.
12411 *
12412 * There is one implicit zero'th argument, a pointer to the extended state.
12413 *
12414 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12415 * @param a1 The first extra argument.
12416 * @param a2 The second extra argument.
12417 * @param a3 The third extra argument.
12418 */
12419#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12420 do { \
12421 IEM_MC_PREPARE_AVX_USAGE(); \
12422 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12423 } while (0)
12424
12425/** @note Not for IOPL or IF testing. */
12426#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12429/** @note Not for IOPL or IF testing. */
12430#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12431/** @note Not for IOPL or IF testing. */
12432#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12433/** @note Not for IOPL or IF testing. */
12434#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12435 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12436 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12437/** @note Not for IOPL or IF testing. */
12438#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12439 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12440 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12441/** @note Not for IOPL or IF testing. */
12442#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12443 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12444 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12445 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12446/** @note Not for IOPL or IF testing. */
12447#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12448 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12449 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12450 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12451#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12452#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12453#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12454/** @note Not for IOPL or IF testing. */
12455#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12456 if ( pVCpu->cpum.GstCtx.cx != 0 \
12457 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12458/** @note Not for IOPL or IF testing. */
12459#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12460 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12461 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12462/** @note Not for IOPL or IF testing. */
12463#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12464 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12465 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12466/** @note Not for IOPL or IF testing. */
12467#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12468 if ( pVCpu->cpum.GstCtx.cx != 0 \
12469 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12470/** @note Not for IOPL or IF testing. */
12471#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12472 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12473 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12474/** @note Not for IOPL or IF testing. */
12475#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12476 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12477 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12478#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12479#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12480
12481#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12482 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12483#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12484 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12485#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12486 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12487#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12488 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12489#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12490 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12491#define IEM_MC_IF_FCW_IM() \
12492 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12493
12494#define IEM_MC_ELSE() } else {
12495#define IEM_MC_ENDIF() } do {} while (0)
12496
12497/** @} */
12498
12499
12500/** @name Opcode Debug Helpers.
12501 * @{
12502 */
12503#ifdef VBOX_WITH_STATISTICS
12504# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12505#else
12506# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12507#endif
12508
12509#ifdef DEBUG
12510# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12511 do { \
12512 IEMOP_INC_STATS(a_Stats); \
12513 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12514 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12515 } while (0)
12516
12517# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12518 do { \
12519 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12520 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12521 (void)RT_CONCAT(OP_,a_Upper); \
12522 (void)(a_fDisHints); \
12523 (void)(a_fIemHints); \
12524 } while (0)
12525
12526# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12527 do { \
12528 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12529 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12530 (void)RT_CONCAT(OP_,a_Upper); \
12531 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12532 (void)(a_fDisHints); \
12533 (void)(a_fIemHints); \
12534 } while (0)
12535
12536# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12537 do { \
12538 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12539 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12540 (void)RT_CONCAT(OP_,a_Upper); \
12541 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12542 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12543 (void)(a_fDisHints); \
12544 (void)(a_fIemHints); \
12545 } while (0)
12546
12547# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12548 do { \
12549 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12550 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12551 (void)RT_CONCAT(OP_,a_Upper); \
12552 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12553 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12554 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12555 (void)(a_fDisHints); \
12556 (void)(a_fIemHints); \
12557 } while (0)
12558
12559# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12560 do { \
12561 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12562 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12563 (void)RT_CONCAT(OP_,a_Upper); \
12564 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12565 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12566 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12567 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12568 (void)(a_fDisHints); \
12569 (void)(a_fIemHints); \
12570 } while (0)
12571
12572#else
12573# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12574
12575# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12576 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12577# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12578 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12579# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12580 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12581# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12582 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12583# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12584 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12585
12586#endif
12587
12588#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12589 IEMOP_MNEMONIC0EX(a_Lower, \
12590 #a_Lower, \
12591 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12592#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12593 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12594 #a_Lower " " #a_Op1, \
12595 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12596#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12597 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12598 #a_Lower " " #a_Op1 "," #a_Op2, \
12599 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12600#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12601 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12602 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12603 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12604#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12605 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12606 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12607 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12608
12609/** @} */
12610
12611
12612/** @name Opcode Helpers.
12613 * @{
12614 */
12615
12616#ifdef IN_RING3
12617# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12618 do { \
12619 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12620 else \
12621 { \
12622 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12623 return IEMOP_RAISE_INVALID_OPCODE(); \
12624 } \
12625 } while (0)
12626#else
12627# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12628 do { \
12629 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12630 else return IEMOP_RAISE_INVALID_OPCODE(); \
12631 } while (0)
12632#endif
12633
12634/** The instruction requires a 186 or later. */
12635#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12636# define IEMOP_HLP_MIN_186() do { } while (0)
12637#else
12638# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12639#endif
12640
12641/** The instruction requires a 286 or later. */
12642#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12643# define IEMOP_HLP_MIN_286() do { } while (0)
12644#else
12645# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12646#endif
12647
12648/** The instruction requires a 386 or later. */
12649#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12650# define IEMOP_HLP_MIN_386() do { } while (0)
12651#else
12652# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12653#endif
12654
12655/** The instruction requires a 386 or later if the given expression is true. */
12656#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12657# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12658#else
12659# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12660#endif
12661
12662/** The instruction requires a 486 or later. */
12663#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12664# define IEMOP_HLP_MIN_486() do { } while (0)
12665#else
12666# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12667#endif
12668
12669/** The instruction requires a Pentium (586) or later. */
12670#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12671# define IEMOP_HLP_MIN_586() do { } while (0)
12672#else
12673# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12674#endif
12675
12676/** The instruction requires a PentiumPro (686) or later. */
12677#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12678# define IEMOP_HLP_MIN_686() do { } while (0)
12679#else
12680# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12681#endif
12682
12683
12684/** The instruction raises an \#UD in real and V8086 mode. */
12685#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12686 do \
12687 { \
12688 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12689 else return IEMOP_RAISE_INVALID_OPCODE(); \
12690 } while (0)
12691
12692#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12693/** This instruction raises an \#UD in real and V8086 mode or when not using a
12694 * 64-bit code segment when in long mode (applicable to all VMX instructions
12695 * except VMCALL).
12696 */
12697#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12698 do \
12699 { \
12700 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12701 && ( !IEM_IS_LONG_MODE(pVCpu) \
12702 || IEM_IS_64BIT_CODE(pVCpu))) \
12703 { /* likely */ } \
12704 else \
12705 { \
12706 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12707 { \
12708 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12709 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12710 return IEMOP_RAISE_INVALID_OPCODE(); \
12711 } \
12712 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12713 { \
12714 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12715 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12716 return IEMOP_RAISE_INVALID_OPCODE(); \
12717 } \
12718 } \
12719 } while (0)
12720
12721/** The instruction can only be executed in VMX operation (VMX root mode and
12722 * non-root mode).
12723 *
12724 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12725 */
12726# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12727 do \
12728 { \
12729 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12730 else \
12731 { \
12732 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12733 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12734 return IEMOP_RAISE_INVALID_OPCODE(); \
12735 } \
12736 } while (0)
12737#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12738
12739/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12740 * 64-bit mode. */
12741#define IEMOP_HLP_NO_64BIT() \
12742 do \
12743 { \
12744 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12745 return IEMOP_RAISE_INVALID_OPCODE(); \
12746 } while (0)
12747
12748/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12749 * 64-bit mode. */
12750#define IEMOP_HLP_ONLY_64BIT() \
12751 do \
12752 { \
12753 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12754 return IEMOP_RAISE_INVALID_OPCODE(); \
12755 } while (0)
12756
12757/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12758#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12759 do \
12760 { \
12761 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12762 iemRecalEffOpSize64Default(pVCpu); \
12763 } while (0)
12764
12765/** The instruction has 64-bit operand size if 64-bit mode. */
12766#define IEMOP_HLP_64BIT_OP_SIZE() \
12767 do \
12768 { \
12769 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12770 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12771 } while (0)
12772
12773/** Only a REX prefix immediately preceeding the first opcode byte takes
12774 * effect. This macro helps ensuring this as well as logging bad guest code. */
12775#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12776 do \
12777 { \
12778 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12779 { \
12780 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12781 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12782 pVCpu->iem.s.uRexB = 0; \
12783 pVCpu->iem.s.uRexIndex = 0; \
12784 pVCpu->iem.s.uRexReg = 0; \
12785 iemRecalEffOpSize(pVCpu); \
12786 } \
12787 } while (0)
12788
12789/**
12790 * Done decoding.
12791 */
12792#define IEMOP_HLP_DONE_DECODING() \
12793 do \
12794 { \
12795 /*nothing for now, maybe later... */ \
12796 } while (0)
12797
12798/**
12799 * Done decoding, raise \#UD exception if lock prefix present.
12800 */
12801#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12802 do \
12803 { \
12804 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12805 { /* likely */ } \
12806 else \
12807 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12808 } while (0)
12809
12810
12811/**
12812 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12813 * repnz or size prefixes are present, or if in real or v8086 mode.
12814 */
12815#define IEMOP_HLP_DONE_VEX_DECODING() \
12816 do \
12817 { \
12818 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12819 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12820 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12821 { /* likely */ } \
12822 else \
12823 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12824 } while (0)
12825
12826/**
12827 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12828 * repnz or size prefixes are present, or if in real or v8086 mode.
12829 */
12830#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12831 do \
12832 { \
12833 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12834 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12835 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12836 && pVCpu->iem.s.uVexLength == 0)) \
12837 { /* likely */ } \
12838 else \
12839 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12840 } while (0)
12841
12842
12843/**
12844 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12845 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12846 * register 0, or if in real or v8086 mode.
12847 */
12848#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12849 do \
12850 { \
12851 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12852 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12853 && !pVCpu->iem.s.uVex3rdReg \
12854 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12855 { /* likely */ } \
12856 else \
12857 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12858 } while (0)
12859
12860/**
12861 * Done decoding VEX, no V, L=0.
12862 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12863 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12864 */
12865#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12866 do \
12867 { \
12868 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12869 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12870 && pVCpu->iem.s.uVexLength == 0 \
12871 && pVCpu->iem.s.uVex3rdReg == 0 \
12872 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12873 { /* likely */ } \
12874 else \
12875 return IEMOP_RAISE_INVALID_OPCODE(); \
12876 } while (0)
12877
12878#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12879 do \
12880 { \
12881 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12882 { /* likely */ } \
12883 else \
12884 { \
12885 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12886 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12887 } \
12888 } while (0)
12889#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12890 do \
12891 { \
12892 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12893 { /* likely */ } \
12894 else \
12895 { \
12896 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12897 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12898 } \
12899 } while (0)
12900
12901/**
12902 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12903 * are present.
12904 */
12905#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12906 do \
12907 { \
12908 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12909 { /* likely */ } \
12910 else \
12911 return IEMOP_RAISE_INVALID_OPCODE(); \
12912 } while (0)
12913
12914/**
12915 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12916 * prefixes are present.
12917 */
12918#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12919 do \
12920 { \
12921 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12922 { /* likely */ } \
12923 else \
12924 return IEMOP_RAISE_INVALID_OPCODE(); \
12925 } while (0)
12926
12927
12928/**
12929 * Calculates the effective address of a ModR/M memory operand.
12930 *
12931 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12932 *
12933 * @return Strict VBox status code.
12934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12935 * @param bRm The ModRM byte.
12936 * @param cbImm The size of any immediate following the
12937 * effective address opcode bytes. Important for
12938 * RIP relative addressing.
12939 * @param pGCPtrEff Where to return the effective address.
12940 */
12941IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12942{
12943 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12944# define SET_SS_DEF() \
12945 do \
12946 { \
12947 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12948 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12949 } while (0)
12950
12951 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12952 {
12953/** @todo Check the effective address size crap! */
12954 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12955 {
12956 uint16_t u16EffAddr;
12957
12958 /* Handle the disp16 form with no registers first. */
12959 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12960 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12961 else
12962 {
12963 /* Get the displacment. */
12964 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12965 {
12966 case 0: u16EffAddr = 0; break;
12967 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12968 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12969 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12970 }
12971
12972 /* Add the base and index registers to the disp. */
12973 switch (bRm & X86_MODRM_RM_MASK)
12974 {
12975 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12976 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12977 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12978 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12979 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12980 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12981 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12982 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12983 }
12984 }
12985
12986 *pGCPtrEff = u16EffAddr;
12987 }
12988 else
12989 {
12990 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12991 uint32_t u32EffAddr;
12992
12993 /* Handle the disp32 form with no registers first. */
12994 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12995 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12996 else
12997 {
12998 /* Get the register (or SIB) value. */
12999 switch ((bRm & X86_MODRM_RM_MASK))
13000 {
13001 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13002 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13003 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13004 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13005 case 4: /* SIB */
13006 {
13007 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13008
13009 /* Get the index and scale it. */
13010 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13011 {
13012 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13013 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13014 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13015 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13016 case 4: u32EffAddr = 0; /*none */ break;
13017 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13018 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13019 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13020 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13021 }
13022 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13023
13024 /* add base */
13025 switch (bSib & X86_SIB_BASE_MASK)
13026 {
13027 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13028 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13029 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13030 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13031 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13032 case 5:
13033 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13034 {
13035 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13036 SET_SS_DEF();
13037 }
13038 else
13039 {
13040 uint32_t u32Disp;
13041 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13042 u32EffAddr += u32Disp;
13043 }
13044 break;
13045 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13046 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13047 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13048 }
13049 break;
13050 }
13051 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13052 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13053 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13055 }
13056
13057 /* Get and add the displacement. */
13058 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13059 {
13060 case 0:
13061 break;
13062 case 1:
13063 {
13064 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13065 u32EffAddr += i8Disp;
13066 break;
13067 }
13068 case 2:
13069 {
13070 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13071 u32EffAddr += u32Disp;
13072 break;
13073 }
13074 default:
13075 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13076 }
13077
13078 }
13079 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13080 *pGCPtrEff = u32EffAddr;
13081 else
13082 {
13083 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13084 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13085 }
13086 }
13087 }
13088 else
13089 {
13090 uint64_t u64EffAddr;
13091
13092 /* Handle the rip+disp32 form with no registers first. */
13093 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13094 {
13095 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13096 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13097 }
13098 else
13099 {
13100 /* Get the register (or SIB) value. */
13101 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13102 {
13103 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13104 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13105 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13106 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13107 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13108 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13109 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13110 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13111 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13112 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13113 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13114 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13115 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13116 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13117 /* SIB */
13118 case 4:
13119 case 12:
13120 {
13121 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13122
13123 /* Get the index and scale it. */
13124 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13125 {
13126 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13127 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13128 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13129 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13130 case 4: u64EffAddr = 0; /*none */ break;
13131 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13132 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13133 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13134 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13135 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13136 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13137 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13138 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13139 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13140 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13141 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13142 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13143 }
13144 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13145
13146 /* add base */
13147 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13148 {
13149 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13150 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13151 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13152 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13153 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13154 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13155 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13156 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13157 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13158 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13159 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13160 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13161 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13162 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13163 /* complicated encodings */
13164 case 5:
13165 case 13:
13166 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13167 {
13168 if (!pVCpu->iem.s.uRexB)
13169 {
13170 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13171 SET_SS_DEF();
13172 }
13173 else
13174 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13175 }
13176 else
13177 {
13178 uint32_t u32Disp;
13179 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13180 u64EffAddr += (int32_t)u32Disp;
13181 }
13182 break;
13183 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13184 }
13185 break;
13186 }
13187 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13188 }
13189
13190 /* Get and add the displacement. */
13191 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13192 {
13193 case 0:
13194 break;
13195 case 1:
13196 {
13197 int8_t i8Disp;
13198 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13199 u64EffAddr += i8Disp;
13200 break;
13201 }
13202 case 2:
13203 {
13204 uint32_t u32Disp;
13205 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13206 u64EffAddr += (int32_t)u32Disp;
13207 break;
13208 }
13209 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13210 }
13211
13212 }
13213
13214 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13215 *pGCPtrEff = u64EffAddr;
13216 else
13217 {
13218 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13219 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13220 }
13221 }
13222
13223 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13224 return VINF_SUCCESS;
13225}
13226
13227
13228/**
13229 * Calculates the effective address of a ModR/M memory operand.
13230 *
13231 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13232 *
13233 * @return Strict VBox status code.
13234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13235 * @param bRm The ModRM byte.
13236 * @param cbImm The size of any immediate following the
13237 * effective address opcode bytes. Important for
13238 * RIP relative addressing.
13239 * @param pGCPtrEff Where to return the effective address.
13240 * @param offRsp RSP displacement.
13241 */
13242IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13243{
13244 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13245# define SET_SS_DEF() \
13246 do \
13247 { \
13248 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13249 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13250 } while (0)
13251
13252 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13253 {
13254/** @todo Check the effective address size crap! */
13255 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13256 {
13257 uint16_t u16EffAddr;
13258
13259 /* Handle the disp16 form with no registers first. */
13260 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13261 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13262 else
13263 {
13264 /* Get the displacment. */
13265 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13266 {
13267 case 0: u16EffAddr = 0; break;
13268 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13269 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13270 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13271 }
13272
13273 /* Add the base and index registers to the disp. */
13274 switch (bRm & X86_MODRM_RM_MASK)
13275 {
13276 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13277 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13278 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13279 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13280 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13281 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13282 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13283 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13284 }
13285 }
13286
13287 *pGCPtrEff = u16EffAddr;
13288 }
13289 else
13290 {
13291 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13292 uint32_t u32EffAddr;
13293
13294 /* Handle the disp32 form with no registers first. */
13295 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13296 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13297 else
13298 {
13299 /* Get the register (or SIB) value. */
13300 switch ((bRm & X86_MODRM_RM_MASK))
13301 {
13302 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13303 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13304 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13305 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13306 case 4: /* SIB */
13307 {
13308 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13309
13310 /* Get the index and scale it. */
13311 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13312 {
13313 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13314 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13315 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13316 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13317 case 4: u32EffAddr = 0; /*none */ break;
13318 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13319 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13320 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13321 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13322 }
13323 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13324
13325 /* add base */
13326 switch (bSib & X86_SIB_BASE_MASK)
13327 {
13328 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13329 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13330 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13331 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13332 case 4:
13333 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13334 SET_SS_DEF();
13335 break;
13336 case 5:
13337 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13338 {
13339 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13340 SET_SS_DEF();
13341 }
13342 else
13343 {
13344 uint32_t u32Disp;
13345 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13346 u32EffAddr += u32Disp;
13347 }
13348 break;
13349 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13350 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13351 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13352 }
13353 break;
13354 }
13355 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13356 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13357 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13358 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13359 }
13360
13361 /* Get and add the displacement. */
13362 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13363 {
13364 case 0:
13365 break;
13366 case 1:
13367 {
13368 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13369 u32EffAddr += i8Disp;
13370 break;
13371 }
13372 case 2:
13373 {
13374 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13375 u32EffAddr += u32Disp;
13376 break;
13377 }
13378 default:
13379 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13380 }
13381
13382 }
13383 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13384 *pGCPtrEff = u32EffAddr;
13385 else
13386 {
13387 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13388 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13389 }
13390 }
13391 }
13392 else
13393 {
13394 uint64_t u64EffAddr;
13395
13396 /* Handle the rip+disp32 form with no registers first. */
13397 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13398 {
13399 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13400 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13401 }
13402 else
13403 {
13404 /* Get the register (or SIB) value. */
13405 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13406 {
13407 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13408 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13409 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13410 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13411 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13412 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13413 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13414 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13415 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13416 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13417 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13418 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13419 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13420 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13421 /* SIB */
13422 case 4:
13423 case 12:
13424 {
13425 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13426
13427 /* Get the index and scale it. */
13428 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13429 {
13430 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13431 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13432 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13433 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13434 case 4: u64EffAddr = 0; /*none */ break;
13435 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13436 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13437 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13438 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13439 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13440 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13441 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13442 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13443 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13444 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13445 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13446 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13447 }
13448 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13449
13450 /* add base */
13451 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13452 {
13453 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13454 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13455 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13456 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13457 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13458 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13459 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13460 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13461 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13462 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13463 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13464 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13465 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13466 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13467 /* complicated encodings */
13468 case 5:
13469 case 13:
13470 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13471 {
13472 if (!pVCpu->iem.s.uRexB)
13473 {
13474 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13475 SET_SS_DEF();
13476 }
13477 else
13478 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13479 }
13480 else
13481 {
13482 uint32_t u32Disp;
13483 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13484 u64EffAddr += (int32_t)u32Disp;
13485 }
13486 break;
13487 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13488 }
13489 break;
13490 }
13491 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13492 }
13493
13494 /* Get and add the displacement. */
13495 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13496 {
13497 case 0:
13498 break;
13499 case 1:
13500 {
13501 int8_t i8Disp;
13502 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13503 u64EffAddr += i8Disp;
13504 break;
13505 }
13506 case 2:
13507 {
13508 uint32_t u32Disp;
13509 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13510 u64EffAddr += (int32_t)u32Disp;
13511 break;
13512 }
13513 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13514 }
13515
13516 }
13517
13518 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13519 *pGCPtrEff = u64EffAddr;
13520 else
13521 {
13522 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13523 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13524 }
13525 }
13526
13527 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13528 return VINF_SUCCESS;
13529}
13530
13531
13532#ifdef IEM_WITH_SETJMP
13533/**
13534 * Calculates the effective address of a ModR/M memory operand.
13535 *
13536 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13537 *
13538 * May longjmp on internal error.
13539 *
13540 * @return The effective address.
13541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13542 * @param bRm The ModRM byte.
13543 * @param cbImm The size of any immediate following the
13544 * effective address opcode bytes. Important for
13545 * RIP relative addressing.
13546 */
13547IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13548{
13549 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13550# define SET_SS_DEF() \
13551 do \
13552 { \
13553 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13554 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13555 } while (0)
13556
13557 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13558 {
13559/** @todo Check the effective address size crap! */
13560 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13561 {
13562 uint16_t u16EffAddr;
13563
13564 /* Handle the disp16 form with no registers first. */
13565 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13566 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13567 else
13568 {
13569 /* Get the displacment. */
13570 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13571 {
13572 case 0: u16EffAddr = 0; break;
13573 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13574 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13575 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13576 }
13577
13578 /* Add the base and index registers to the disp. */
13579 switch (bRm & X86_MODRM_RM_MASK)
13580 {
13581 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13582 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13583 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13584 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13585 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13586 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13587 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13588 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13589 }
13590 }
13591
13592 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13593 return u16EffAddr;
13594 }
13595
13596 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13597 uint32_t u32EffAddr;
13598
13599 /* Handle the disp32 form with no registers first. */
13600 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13601 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13602 else
13603 {
13604 /* Get the register (or SIB) value. */
13605 switch ((bRm & X86_MODRM_RM_MASK))
13606 {
13607 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13608 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13609 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13610 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13611 case 4: /* SIB */
13612 {
13613 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13614
13615 /* Get the index and scale it. */
13616 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13617 {
13618 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13619 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13620 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13621 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13622 case 4: u32EffAddr = 0; /*none */ break;
13623 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13624 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13625 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13626 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13627 }
13628 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13629
13630 /* add base */
13631 switch (bSib & X86_SIB_BASE_MASK)
13632 {
13633 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13634 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13635 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13636 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13637 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13638 case 5:
13639 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13640 {
13641 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13642 SET_SS_DEF();
13643 }
13644 else
13645 {
13646 uint32_t u32Disp;
13647 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13648 u32EffAddr += u32Disp;
13649 }
13650 break;
13651 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13652 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13653 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13654 }
13655 break;
13656 }
13657 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13658 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13659 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13660 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13661 }
13662
13663 /* Get and add the displacement. */
13664 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13665 {
13666 case 0:
13667 break;
13668 case 1:
13669 {
13670 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13671 u32EffAddr += i8Disp;
13672 break;
13673 }
13674 case 2:
13675 {
13676 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13677 u32EffAddr += u32Disp;
13678 break;
13679 }
13680 default:
13681 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13682 }
13683 }
13684
13685 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13686 {
13687 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13688 return u32EffAddr;
13689 }
13690 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13691 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13692 return u32EffAddr & UINT16_MAX;
13693 }
13694
13695 uint64_t u64EffAddr;
13696
13697 /* Handle the rip+disp32 form with no registers first. */
13698 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13699 {
13700 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13701 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13702 }
13703 else
13704 {
13705 /* Get the register (or SIB) value. */
13706 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13707 {
13708 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13709 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13710 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13711 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13712 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13713 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13714 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13715 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13716 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13717 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13718 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13719 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13720 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13721 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13722 /* SIB */
13723 case 4:
13724 case 12:
13725 {
13726 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13727
13728 /* Get the index and scale it. */
13729 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13730 {
13731 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13732 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13733 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13734 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13735 case 4: u64EffAddr = 0; /*none */ break;
13736 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13737 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13738 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13739 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13740 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13741 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13742 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13743 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13744 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13745 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13746 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13747 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13748 }
13749 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13750
13751 /* add base */
13752 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13753 {
13754 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13755 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13756 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13757 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13758 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13759 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13760 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13761 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13762 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13763 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13764 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13765 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13766 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13767 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13768 /* complicated encodings */
13769 case 5:
13770 case 13:
13771 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13772 {
13773 if (!pVCpu->iem.s.uRexB)
13774 {
13775 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13776 SET_SS_DEF();
13777 }
13778 else
13779 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13780 }
13781 else
13782 {
13783 uint32_t u32Disp;
13784 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13785 u64EffAddr += (int32_t)u32Disp;
13786 }
13787 break;
13788 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13789 }
13790 break;
13791 }
13792 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13793 }
13794
13795 /* Get and add the displacement. */
13796 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13797 {
13798 case 0:
13799 break;
13800 case 1:
13801 {
13802 int8_t i8Disp;
13803 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13804 u64EffAddr += i8Disp;
13805 break;
13806 }
13807 case 2:
13808 {
13809 uint32_t u32Disp;
13810 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13811 u64EffAddr += (int32_t)u32Disp;
13812 break;
13813 }
13814 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13815 }
13816
13817 }
13818
13819 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13820 {
13821 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13822 return u64EffAddr;
13823 }
13824 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13825 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13826 return u64EffAddr & UINT32_MAX;
13827}
13828#endif /* IEM_WITH_SETJMP */
13829
13830/** @} */
13831
13832
13833
13834/*
13835 * Include the instructions
13836 */
13837#include "IEMAllInstructions.cpp.h"
13838
13839
13840
13841#ifdef LOG_ENABLED
13842/**
13843 * Logs the current instruction.
13844 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13845 * @param fSameCtx Set if we have the same context information as the VMM,
13846 * clear if we may have already executed an instruction in
13847 * our debug context. When clear, we assume IEMCPU holds
13848 * valid CPU mode info.
13849 *
13850 * The @a fSameCtx parameter is now misleading and obsolete.
13851 * @param pszFunction The IEM function doing the execution.
13852 */
13853IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13854{
13855# ifdef IN_RING3
13856 if (LogIs2Enabled())
13857 {
13858 char szInstr[256];
13859 uint32_t cbInstr = 0;
13860 if (fSameCtx)
13861 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13862 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13863 szInstr, sizeof(szInstr), &cbInstr);
13864 else
13865 {
13866 uint32_t fFlags = 0;
13867 switch (pVCpu->iem.s.enmCpuMode)
13868 {
13869 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13870 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13871 case IEMMODE_16BIT:
13872 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13873 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13874 else
13875 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13876 break;
13877 }
13878 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13879 szInstr, sizeof(szInstr), &cbInstr);
13880 }
13881
13882 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13883 Log2(("**** %s\n"
13884 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13885 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13886 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13887 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13888 " %s\n"
13889 , pszFunction,
13890 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13891 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13892 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13893 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13894 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13895 szInstr));
13896
13897 if (LogIs3Enabled())
13898 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13899 }
13900 else
13901# endif
13902 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13903 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13904 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13905}
13906#endif /* LOG_ENABLED */
13907
13908
13909/**
13910 * Makes status code addjustments (pass up from I/O and access handler)
13911 * as well as maintaining statistics.
13912 *
13913 * @returns Strict VBox status code to pass up.
13914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13915 * @param rcStrict The status from executing an instruction.
13916 */
13917DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13918{
13919 if (rcStrict != VINF_SUCCESS)
13920 {
13921 if (RT_SUCCESS(rcStrict))
13922 {
13923 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13924 || rcStrict == VINF_IOM_R3_IOPORT_READ
13925 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13926 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13927 || rcStrict == VINF_IOM_R3_MMIO_READ
13928 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13929 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13930 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13931 || rcStrict == VINF_CPUM_R3_MSR_READ
13932 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13933 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13934 || rcStrict == VINF_EM_RAW_TO_R3
13935 || rcStrict == VINF_EM_TRIPLE_FAULT
13936 || rcStrict == VINF_GIM_R3_HYPERCALL
13937 /* raw-mode / virt handlers only: */
13938 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13939 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13940 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13941 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13942 || rcStrict == VINF_SELM_SYNC_GDT
13943 || rcStrict == VINF_CSAM_PENDING_ACTION
13944 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13945 /* nested hw.virt codes: */
13946 || rcStrict == VINF_VMX_VMEXIT
13947 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13948 || rcStrict == VINF_SVM_VMEXIT
13949 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13950/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13951 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13952#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13953 if ( rcStrict == VINF_VMX_VMEXIT
13954 && rcPassUp == VINF_SUCCESS)
13955 rcStrict = VINF_SUCCESS;
13956 else
13957#endif
13958#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13959 if ( rcStrict == VINF_SVM_VMEXIT
13960 && rcPassUp == VINF_SUCCESS)
13961 rcStrict = VINF_SUCCESS;
13962 else
13963#endif
13964 if (rcPassUp == VINF_SUCCESS)
13965 pVCpu->iem.s.cRetInfStatuses++;
13966 else if ( rcPassUp < VINF_EM_FIRST
13967 || rcPassUp > VINF_EM_LAST
13968 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13969 {
13970 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13971 pVCpu->iem.s.cRetPassUpStatus++;
13972 rcStrict = rcPassUp;
13973 }
13974 else
13975 {
13976 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13977 pVCpu->iem.s.cRetInfStatuses++;
13978 }
13979 }
13980 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13981 pVCpu->iem.s.cRetAspectNotImplemented++;
13982 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13983 pVCpu->iem.s.cRetInstrNotImplemented++;
13984 else
13985 pVCpu->iem.s.cRetErrStatuses++;
13986 }
13987 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13988 {
13989 pVCpu->iem.s.cRetPassUpStatus++;
13990 rcStrict = pVCpu->iem.s.rcPassUp;
13991 }
13992
13993 return rcStrict;
13994}
13995
13996
13997/**
13998 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13999 * IEMExecOneWithPrefetchedByPC.
14000 *
14001 * Similar code is found in IEMExecLots.
14002 *
14003 * @return Strict VBox status code.
14004 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14005 * @param fExecuteInhibit If set, execute the instruction following CLI,
14006 * POP SS and MOV SS,GR.
14007 * @param pszFunction The calling function name.
14008 */
14009DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
14010{
14011 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14012 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14013 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14014 RT_NOREF_PV(pszFunction);
14015
14016#ifdef IEM_WITH_SETJMP
14017 VBOXSTRICTRC rcStrict;
14018 jmp_buf JmpBuf;
14019 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14020 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14021 if ((rcStrict = setjmp(JmpBuf)) == 0)
14022 {
14023 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14024 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14025 }
14026 else
14027 pVCpu->iem.s.cLongJumps++;
14028 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14029#else
14030 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14031 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14032#endif
14033 if (rcStrict == VINF_SUCCESS)
14034 pVCpu->iem.s.cInstructions++;
14035 if (pVCpu->iem.s.cActiveMappings > 0)
14036 {
14037 Assert(rcStrict != VINF_SUCCESS);
14038 iemMemRollback(pVCpu);
14039 }
14040 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14041 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14042 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14043
14044//#ifdef DEBUG
14045// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14046//#endif
14047
14048#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14049 /*
14050 * Perform any VMX nested-guest instruction boundary actions.
14051 *
14052 * If any of these causes a VM-exit, we must skip executing the next
14053 * instruction (would run into stale page tables). A VM-exit makes sure
14054 * there is no interrupt-inhibition, so that should ensure we don't go
14055 * to try execute the next instruction. Clearing fExecuteInhibit is
14056 * problematic because of the setjmp/longjmp clobbering above.
14057 */
14058 if ( rcStrict == VINF_SUCCESS
14059 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14060 {
14061 bool fCheckRemainingIntercepts = true;
14062 /* TPR-below threshold/APIC write has the highest priority. */
14063 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14064 {
14065 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14066 fCheckRemainingIntercepts = false;
14067 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14068 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14069 }
14070 /* MTF takes priority over VMX-preemption timer. */
14071 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14072 {
14073 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
14074 fCheckRemainingIntercepts = false;
14075 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14076 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14077 }
14078 /* VMX preemption timer takes priority over NMI-window exits. */
14079 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14080 {
14081 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14082 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14083 rcStrict = VINF_SUCCESS;
14084 else
14085 {
14086 fCheckRemainingIntercepts = false;
14087 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14088 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14089 }
14090 }
14091
14092 /*
14093 * Check remaining intercepts.
14094 *
14095 * NMI-window and Interrupt-window VM-exits.
14096 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
14097 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
14098 *
14099 * See Intel spec. 26.7.6 "NMI-Window Exiting".
14100 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
14101 */
14102 if ( fCheckRemainingIntercepts
14103 && !TRPMHasTrap(pVCpu)
14104 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
14105 {
14106 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents);
14107 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
14108 && CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
14109 {
14110 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
14111 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14112 }
14113 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
14114 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
14115 {
14116 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
14117 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
14118 }
14119 }
14120 }
14121#endif
14122
14123 /* Execute the next instruction as well if a cli, pop ss or
14124 mov ss, Gr has just completed successfully. */
14125 if ( fExecuteInhibit
14126 && rcStrict == VINF_SUCCESS
14127 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14128 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14129 {
14130 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14131 if (rcStrict == VINF_SUCCESS)
14132 {
14133#ifdef LOG_ENABLED
14134 iemLogCurInstr(pVCpu, false, pszFunction);
14135#endif
14136#ifdef IEM_WITH_SETJMP
14137 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14138 if ((rcStrict = setjmp(JmpBuf)) == 0)
14139 {
14140 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14141 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14142 }
14143 else
14144 pVCpu->iem.s.cLongJumps++;
14145 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14146#else
14147 IEM_OPCODE_GET_NEXT_U8(&b);
14148 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14149#endif
14150 if (rcStrict == VINF_SUCCESS)
14151 pVCpu->iem.s.cInstructions++;
14152 if (pVCpu->iem.s.cActiveMappings > 0)
14153 {
14154 Assert(rcStrict != VINF_SUCCESS);
14155 iemMemRollback(pVCpu);
14156 }
14157 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14158 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14159 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14160 }
14161 else if (pVCpu->iem.s.cActiveMappings > 0)
14162 iemMemRollback(pVCpu);
14163 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14164 }
14165
14166 /*
14167 * Return value fiddling, statistics and sanity assertions.
14168 */
14169 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14170
14171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14173 return rcStrict;
14174}
14175
14176
14177#ifdef IN_RC
14178/**
14179 * Re-enters raw-mode or ensure we return to ring-3.
14180 *
14181 * @returns rcStrict, maybe modified.
14182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14183 * @param rcStrict The status code returne by the interpreter.
14184 */
14185DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14186{
14187 if ( !pVCpu->iem.s.fInPatchCode
14188 && ( rcStrict == VINF_SUCCESS
14189 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14190 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14191 {
14192 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14193 CPUMRawEnter(pVCpu);
14194 else
14195 {
14196 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14197 rcStrict = VINF_EM_RESCHEDULE;
14198 }
14199 }
14200 return rcStrict;
14201}
14202#endif
14203
14204
14205/**
14206 * Execute one instruction.
14207 *
14208 * @return Strict VBox status code.
14209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14210 */
14211VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14212{
14213#ifdef LOG_ENABLED
14214 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14215#endif
14216
14217 /*
14218 * Do the decoding and emulation.
14219 */
14220 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14221 if (rcStrict == VINF_SUCCESS)
14222 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14223 else if (pVCpu->iem.s.cActiveMappings > 0)
14224 iemMemRollback(pVCpu);
14225
14226#ifdef IN_RC
14227 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14228#endif
14229 if (rcStrict != VINF_SUCCESS)
14230 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14231 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14232 return rcStrict;
14233}
14234
14235
14236VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14237{
14238 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14239
14240 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14241 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14242 if (rcStrict == VINF_SUCCESS)
14243 {
14244 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14245 if (pcbWritten)
14246 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14247 }
14248 else if (pVCpu->iem.s.cActiveMappings > 0)
14249 iemMemRollback(pVCpu);
14250
14251#ifdef IN_RC
14252 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14253#endif
14254 return rcStrict;
14255}
14256
14257
14258VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14259 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14260{
14261 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14262
14263 VBOXSTRICTRC rcStrict;
14264 if ( cbOpcodeBytes
14265 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14266 {
14267 iemInitDecoder(pVCpu, false);
14268#ifdef IEM_WITH_CODE_TLB
14269 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14270 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14271 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14272 pVCpu->iem.s.offCurInstrStart = 0;
14273 pVCpu->iem.s.offInstrNextByte = 0;
14274#else
14275 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14276 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14277#endif
14278 rcStrict = VINF_SUCCESS;
14279 }
14280 else
14281 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14282 if (rcStrict == VINF_SUCCESS)
14283 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14284 else if (pVCpu->iem.s.cActiveMappings > 0)
14285 iemMemRollback(pVCpu);
14286
14287#ifdef IN_RC
14288 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14289#endif
14290 return rcStrict;
14291}
14292
14293
14294VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14295{
14296 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14297
14298 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14299 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14300 if (rcStrict == VINF_SUCCESS)
14301 {
14302 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14303 if (pcbWritten)
14304 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14305 }
14306 else if (pVCpu->iem.s.cActiveMappings > 0)
14307 iemMemRollback(pVCpu);
14308
14309#ifdef IN_RC
14310 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14311#endif
14312 return rcStrict;
14313}
14314
14315
14316VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14317 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14318{
14319 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14320
14321 VBOXSTRICTRC rcStrict;
14322 if ( cbOpcodeBytes
14323 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14324 {
14325 iemInitDecoder(pVCpu, true);
14326#ifdef IEM_WITH_CODE_TLB
14327 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14328 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14329 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14330 pVCpu->iem.s.offCurInstrStart = 0;
14331 pVCpu->iem.s.offInstrNextByte = 0;
14332#else
14333 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14334 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14335#endif
14336 rcStrict = VINF_SUCCESS;
14337 }
14338 else
14339 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14340 if (rcStrict == VINF_SUCCESS)
14341 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14342 else if (pVCpu->iem.s.cActiveMappings > 0)
14343 iemMemRollback(pVCpu);
14344
14345#ifdef IN_RC
14346 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14347#endif
14348 return rcStrict;
14349}
14350
14351
14352/**
14353 * For debugging DISGetParamSize, may come in handy.
14354 *
14355 * @returns Strict VBox status code.
14356 * @param pVCpu The cross context virtual CPU structure of the
14357 * calling EMT.
14358 * @param pCtxCore The context core structure.
14359 * @param OpcodeBytesPC The PC of the opcode bytes.
14360 * @param pvOpcodeBytes Prefeched opcode bytes.
14361 * @param cbOpcodeBytes Number of prefetched bytes.
14362 * @param pcbWritten Where to return the number of bytes written.
14363 * Optional.
14364 */
14365VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14366 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14367 uint32_t *pcbWritten)
14368{
14369 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14370
14371 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14372 VBOXSTRICTRC rcStrict;
14373 if ( cbOpcodeBytes
14374 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14375 {
14376 iemInitDecoder(pVCpu, true);
14377#ifdef IEM_WITH_CODE_TLB
14378 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14379 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14380 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14381 pVCpu->iem.s.offCurInstrStart = 0;
14382 pVCpu->iem.s.offInstrNextByte = 0;
14383#else
14384 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14385 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14386#endif
14387 rcStrict = VINF_SUCCESS;
14388 }
14389 else
14390 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14391 if (rcStrict == VINF_SUCCESS)
14392 {
14393 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14394 if (pcbWritten)
14395 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14396 }
14397 else if (pVCpu->iem.s.cActiveMappings > 0)
14398 iemMemRollback(pVCpu);
14399
14400#ifdef IN_RC
14401 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14402#endif
14403 return rcStrict;
14404}
14405
14406
14407VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14408{
14409 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14410 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14411
14412 /*
14413 * See if there is an interrupt pending in TRPM, inject it if we can.
14414 */
14415 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14416#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14417 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14418 if (fIntrEnabled)
14419 {
14420 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14421 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14422 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14423 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14424 else
14425 {
14426 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14427 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14428 }
14429 }
14430#else
14431 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14432#endif
14433
14434 /** @todo What if we are injecting an exception and not an interrupt? Is that
14435 * possible here? */
14436 if ( fIntrEnabled
14437 && TRPMHasTrap(pVCpu)
14438 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14439 {
14440 uint8_t u8TrapNo;
14441 TRPMEVENT enmType;
14442 RTGCUINT uErrCode;
14443 RTGCPTR uCr2;
14444 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14445 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14446 TRPMResetTrap(pVCpu);
14447#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14448 /* Injecting an event may cause a VM-exit. */
14449 if ( rcStrict != VINF_SUCCESS
14450 && rcStrict != VINF_IEM_RAISED_XCPT)
14451 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14452#else
14453 NOREF(rcStrict);
14454#endif
14455 }
14456
14457 /*
14458 * Initial decoder init w/ prefetch, then setup setjmp.
14459 */
14460 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14461 if (rcStrict == VINF_SUCCESS)
14462 {
14463#ifdef IEM_WITH_SETJMP
14464 jmp_buf JmpBuf;
14465 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14466 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14467 pVCpu->iem.s.cActiveMappings = 0;
14468 if ((rcStrict = setjmp(JmpBuf)) == 0)
14469#endif
14470 {
14471 /*
14472 * The run loop. We limit ourselves to 4096 instructions right now.
14473 */
14474 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14475 PVM pVM = pVCpu->CTX_SUFF(pVM);
14476 for (;;)
14477 {
14478 /*
14479 * Log the state.
14480 */
14481#ifdef LOG_ENABLED
14482 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14483#endif
14484
14485 /*
14486 * Do the decoding and emulation.
14487 */
14488 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14489 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14490 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14491 {
14492 Assert(pVCpu->iem.s.cActiveMappings == 0);
14493 pVCpu->iem.s.cInstructions++;
14494 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14495 {
14496 uint64_t fCpu = pVCpu->fLocalForcedActions
14497 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14498 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14499 | VMCPU_FF_TLB_FLUSH
14500 | VMCPU_FF_INHIBIT_INTERRUPTS
14501 | VMCPU_FF_BLOCK_NMIS
14502 | VMCPU_FF_UNHALT ));
14503
14504 if (RT_LIKELY( ( !fCpu
14505 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14506 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14507 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14508 {
14509 if (cMaxInstructionsGccStupidity-- > 0)
14510 {
14511 /* Poll timers every now an then according to the caller's specs. */
14512 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14513 || !TMTimerPollBool(pVM, pVCpu))
14514 {
14515 Assert(pVCpu->iem.s.cActiveMappings == 0);
14516 iemReInitDecoder(pVCpu);
14517 continue;
14518 }
14519 }
14520 }
14521 }
14522 Assert(pVCpu->iem.s.cActiveMappings == 0);
14523 }
14524 else if (pVCpu->iem.s.cActiveMappings > 0)
14525 iemMemRollback(pVCpu);
14526 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14527 break;
14528 }
14529 }
14530#ifdef IEM_WITH_SETJMP
14531 else
14532 {
14533 if (pVCpu->iem.s.cActiveMappings > 0)
14534 iemMemRollback(pVCpu);
14535# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14536 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14537# endif
14538 pVCpu->iem.s.cLongJumps++;
14539 }
14540 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14541#endif
14542
14543 /*
14544 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14545 */
14546 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14547 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14548 }
14549 else
14550 {
14551 if (pVCpu->iem.s.cActiveMappings > 0)
14552 iemMemRollback(pVCpu);
14553
14554#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14555 /*
14556 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14557 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14558 */
14559 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14560#endif
14561 }
14562
14563 /*
14564 * Maybe re-enter raw-mode and log.
14565 */
14566#ifdef IN_RC
14567 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14568#endif
14569 if (rcStrict != VINF_SUCCESS)
14570 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14571 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14572 if (pcInstructions)
14573 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14574 return rcStrict;
14575}
14576
14577
14578/**
14579 * Interface used by EMExecuteExec, does exit statistics and limits.
14580 *
14581 * @returns Strict VBox status code.
14582 * @param pVCpu The cross context virtual CPU structure.
14583 * @param fWillExit To be defined.
14584 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14585 * @param cMaxInstructions Maximum number of instructions to execute.
14586 * @param cMaxInstructionsWithoutExits
14587 * The max number of instructions without exits.
14588 * @param pStats Where to return statistics.
14589 */
14590VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14591 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14592{
14593 NOREF(fWillExit); /** @todo define flexible exit crits */
14594
14595 /*
14596 * Initialize return stats.
14597 */
14598 pStats->cInstructions = 0;
14599 pStats->cExits = 0;
14600 pStats->cMaxExitDistance = 0;
14601 pStats->cReserved = 0;
14602
14603 /*
14604 * Initial decoder init w/ prefetch, then setup setjmp.
14605 */
14606 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14607 if (rcStrict == VINF_SUCCESS)
14608 {
14609#ifdef IEM_WITH_SETJMP
14610 jmp_buf JmpBuf;
14611 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14612 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14613 pVCpu->iem.s.cActiveMappings = 0;
14614 if ((rcStrict = setjmp(JmpBuf)) == 0)
14615#endif
14616 {
14617#ifdef IN_RING0
14618 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14619#endif
14620 uint32_t cInstructionSinceLastExit = 0;
14621
14622 /*
14623 * The run loop. We limit ourselves to 4096 instructions right now.
14624 */
14625 PVM pVM = pVCpu->CTX_SUFF(pVM);
14626 for (;;)
14627 {
14628 /*
14629 * Log the state.
14630 */
14631#ifdef LOG_ENABLED
14632 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14633#endif
14634
14635 /*
14636 * Do the decoding and emulation.
14637 */
14638 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14639
14640 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14641 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14642
14643 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14644 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14645 {
14646 pStats->cExits += 1;
14647 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14648 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14649 cInstructionSinceLastExit = 0;
14650 }
14651
14652 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14653 {
14654 Assert(pVCpu->iem.s.cActiveMappings == 0);
14655 pVCpu->iem.s.cInstructions++;
14656 pStats->cInstructions++;
14657 cInstructionSinceLastExit++;
14658 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14659 {
14660 uint64_t fCpu = pVCpu->fLocalForcedActions
14661 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14662 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14663 | VMCPU_FF_TLB_FLUSH
14664 | VMCPU_FF_INHIBIT_INTERRUPTS
14665 | VMCPU_FF_BLOCK_NMIS
14666 | VMCPU_FF_UNHALT ));
14667
14668 if (RT_LIKELY( ( ( !fCpu
14669 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14670 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14671 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14672 || pStats->cInstructions < cMinInstructions))
14673 {
14674 if (pStats->cInstructions < cMaxInstructions)
14675 {
14676 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14677 {
14678#ifdef IN_RING0
14679 if ( !fCheckPreemptionPending
14680 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14681#endif
14682 {
14683 Assert(pVCpu->iem.s.cActiveMappings == 0);
14684 iemReInitDecoder(pVCpu);
14685 continue;
14686 }
14687#ifdef IN_RING0
14688 rcStrict = VINF_EM_RAW_INTERRUPT;
14689 break;
14690#endif
14691 }
14692 }
14693 }
14694 Assert(!(fCpu & VMCPU_FF_IEM));
14695 }
14696 Assert(pVCpu->iem.s.cActiveMappings == 0);
14697 }
14698 else if (pVCpu->iem.s.cActiveMappings > 0)
14699 iemMemRollback(pVCpu);
14700 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14701 break;
14702 }
14703 }
14704#ifdef IEM_WITH_SETJMP
14705 else
14706 {
14707 if (pVCpu->iem.s.cActiveMappings > 0)
14708 iemMemRollback(pVCpu);
14709 pVCpu->iem.s.cLongJumps++;
14710 }
14711 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14712#endif
14713
14714 /*
14715 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14716 */
14717 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14718 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14719 }
14720 else
14721 {
14722 if (pVCpu->iem.s.cActiveMappings > 0)
14723 iemMemRollback(pVCpu);
14724
14725#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14726 /*
14727 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14728 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14729 */
14730 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14731#endif
14732 }
14733
14734 /*
14735 * Maybe re-enter raw-mode and log.
14736 */
14737#ifdef IN_RC
14738 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14739#endif
14740 if (rcStrict != VINF_SUCCESS)
14741 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14742 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14743 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14744 return rcStrict;
14745}
14746
14747
14748/**
14749 * Injects a trap, fault, abort, software interrupt or external interrupt.
14750 *
14751 * The parameter list matches TRPMQueryTrapAll pretty closely.
14752 *
14753 * @returns Strict VBox status code.
14754 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14755 * @param u8TrapNo The trap number.
14756 * @param enmType What type is it (trap/fault/abort), software
14757 * interrupt or hardware interrupt.
14758 * @param uErrCode The error code if applicable.
14759 * @param uCr2 The CR2 value if applicable.
14760 * @param cbInstr The instruction length (only relevant for
14761 * software interrupts).
14762 */
14763VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14764 uint8_t cbInstr)
14765{
14766 iemInitDecoder(pVCpu, false);
14767#ifdef DBGFTRACE_ENABLED
14768 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14769 u8TrapNo, enmType, uErrCode, uCr2);
14770#endif
14771
14772 uint32_t fFlags;
14773 switch (enmType)
14774 {
14775 case TRPM_HARDWARE_INT:
14776 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14777 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14778 uErrCode = uCr2 = 0;
14779 break;
14780
14781 case TRPM_SOFTWARE_INT:
14782 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14783 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14784 uErrCode = uCr2 = 0;
14785 break;
14786
14787 case TRPM_TRAP:
14788 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14789 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14790 if (u8TrapNo == X86_XCPT_PF)
14791 fFlags |= IEM_XCPT_FLAGS_CR2;
14792 switch (u8TrapNo)
14793 {
14794 case X86_XCPT_DF:
14795 case X86_XCPT_TS:
14796 case X86_XCPT_NP:
14797 case X86_XCPT_SS:
14798 case X86_XCPT_PF:
14799 case X86_XCPT_AC:
14800 fFlags |= IEM_XCPT_FLAGS_ERR;
14801 break;
14802 }
14803 break;
14804
14805 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14806 }
14807
14808 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14809
14810 if (pVCpu->iem.s.cActiveMappings > 0)
14811 iemMemRollback(pVCpu);
14812
14813 return rcStrict;
14814}
14815
14816
14817/**
14818 * Injects the active TRPM event.
14819 *
14820 * @returns Strict VBox status code.
14821 * @param pVCpu The cross context virtual CPU structure.
14822 */
14823VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14824{
14825#ifndef IEM_IMPLEMENTS_TASKSWITCH
14826 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14827#else
14828 uint8_t u8TrapNo;
14829 TRPMEVENT enmType;
14830 RTGCUINT uErrCode;
14831 RTGCUINTPTR uCr2;
14832 uint8_t cbInstr;
14833 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14834 if (RT_FAILURE(rc))
14835 return rc;
14836
14837 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14838#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14839 if (rcStrict == VINF_SVM_VMEXIT)
14840 rcStrict = VINF_SUCCESS;
14841#endif
14842#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14843 if (rcStrict == VINF_VMX_VMEXIT)
14844 rcStrict = VINF_SUCCESS;
14845#endif
14846 /** @todo Are there any other codes that imply the event was successfully
14847 * delivered to the guest? See @bugref{6607}. */
14848 if ( rcStrict == VINF_SUCCESS
14849 || rcStrict == VINF_IEM_RAISED_XCPT)
14850 TRPMResetTrap(pVCpu);
14851
14852 return rcStrict;
14853#endif
14854}
14855
14856
14857VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14858{
14859 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14860 return VERR_NOT_IMPLEMENTED;
14861}
14862
14863
14864VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14865{
14866 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14867 return VERR_NOT_IMPLEMENTED;
14868}
14869
14870
14871#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14872/**
14873 * Executes a IRET instruction with default operand size.
14874 *
14875 * This is for PATM.
14876 *
14877 * @returns VBox status code.
14878 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14879 * @param pCtxCore The register frame.
14880 */
14881VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14882{
14883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14884
14885 iemCtxCoreToCtx(pCtx, pCtxCore);
14886 iemInitDecoder(pVCpu);
14887 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14888 if (rcStrict == VINF_SUCCESS)
14889 iemCtxToCtxCore(pCtxCore, pCtx);
14890 else
14891 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14892 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14893 return rcStrict;
14894}
14895#endif
14896
14897
14898/**
14899 * Macro used by the IEMExec* method to check the given instruction length.
14900 *
14901 * Will return on failure!
14902 *
14903 * @param a_cbInstr The given instruction length.
14904 * @param a_cbMin The minimum length.
14905 */
14906#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14907 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14908 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14909
14910
14911/**
14912 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14913 *
14914 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14915 *
14916 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14918 * @param rcStrict The status code to fiddle.
14919 */
14920DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14921{
14922 iemUninitExec(pVCpu);
14923#ifdef IN_RC
14924 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14925#else
14926 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14927#endif
14928}
14929
14930
14931/**
14932 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14933 *
14934 * This API ASSUMES that the caller has already verified that the guest code is
14935 * allowed to access the I/O port. (The I/O port is in the DX register in the
14936 * guest state.)
14937 *
14938 * @returns Strict VBox status code.
14939 * @param pVCpu The cross context virtual CPU structure.
14940 * @param cbValue The size of the I/O port access (1, 2, or 4).
14941 * @param enmAddrMode The addressing mode.
14942 * @param fRepPrefix Indicates whether a repeat prefix is used
14943 * (doesn't matter which for this instruction).
14944 * @param cbInstr The instruction length in bytes.
14945 * @param iEffSeg The effective segment address.
14946 * @param fIoChecked Whether the access to the I/O port has been
14947 * checked or not. It's typically checked in the
14948 * HM scenario.
14949 */
14950VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14951 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14952{
14953 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14954 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14955
14956 /*
14957 * State init.
14958 */
14959 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14960
14961 /*
14962 * Switch orgy for getting to the right handler.
14963 */
14964 VBOXSTRICTRC rcStrict;
14965 if (fRepPrefix)
14966 {
14967 switch (enmAddrMode)
14968 {
14969 case IEMMODE_16BIT:
14970 switch (cbValue)
14971 {
14972 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14973 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14974 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14975 default:
14976 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14977 }
14978 break;
14979
14980 case IEMMODE_32BIT:
14981 switch (cbValue)
14982 {
14983 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14984 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14985 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14986 default:
14987 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14988 }
14989 break;
14990
14991 case IEMMODE_64BIT:
14992 switch (cbValue)
14993 {
14994 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14995 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14996 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14997 default:
14998 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14999 }
15000 break;
15001
15002 default:
15003 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15004 }
15005 }
15006 else
15007 {
15008 switch (enmAddrMode)
15009 {
15010 case IEMMODE_16BIT:
15011 switch (cbValue)
15012 {
15013 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15014 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15015 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15016 default:
15017 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15018 }
15019 break;
15020
15021 case IEMMODE_32BIT:
15022 switch (cbValue)
15023 {
15024 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15025 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15026 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15027 default:
15028 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15029 }
15030 break;
15031
15032 case IEMMODE_64BIT:
15033 switch (cbValue)
15034 {
15035 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15036 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15037 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15038 default:
15039 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15040 }
15041 break;
15042
15043 default:
15044 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15045 }
15046 }
15047
15048 if (pVCpu->iem.s.cActiveMappings)
15049 iemMemRollback(pVCpu);
15050
15051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15052}
15053
15054
15055/**
15056 * Interface for HM and EM for executing string I/O IN (read) instructions.
15057 *
15058 * This API ASSUMES that the caller has already verified that the guest code is
15059 * allowed to access the I/O port. (The I/O port is in the DX register in the
15060 * guest state.)
15061 *
15062 * @returns Strict VBox status code.
15063 * @param pVCpu The cross context virtual CPU structure.
15064 * @param cbValue The size of the I/O port access (1, 2, or 4).
15065 * @param enmAddrMode The addressing mode.
15066 * @param fRepPrefix Indicates whether a repeat prefix is used
15067 * (doesn't matter which for this instruction).
15068 * @param cbInstr The instruction length in bytes.
15069 * @param fIoChecked Whether the access to the I/O port has been
15070 * checked or not. It's typically checked in the
15071 * HM scenario.
15072 */
15073VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15074 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15075{
15076 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15077
15078 /*
15079 * State init.
15080 */
15081 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15082
15083 /*
15084 * Switch orgy for getting to the right handler.
15085 */
15086 VBOXSTRICTRC rcStrict;
15087 if (fRepPrefix)
15088 {
15089 switch (enmAddrMode)
15090 {
15091 case IEMMODE_16BIT:
15092 switch (cbValue)
15093 {
15094 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15095 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15096 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15097 default:
15098 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15099 }
15100 break;
15101
15102 case IEMMODE_32BIT:
15103 switch (cbValue)
15104 {
15105 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15106 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15107 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15108 default:
15109 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15110 }
15111 break;
15112
15113 case IEMMODE_64BIT:
15114 switch (cbValue)
15115 {
15116 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15117 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15118 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15119 default:
15120 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15121 }
15122 break;
15123
15124 default:
15125 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15126 }
15127 }
15128 else
15129 {
15130 switch (enmAddrMode)
15131 {
15132 case IEMMODE_16BIT:
15133 switch (cbValue)
15134 {
15135 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15136 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15137 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15138 default:
15139 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15140 }
15141 break;
15142
15143 case IEMMODE_32BIT:
15144 switch (cbValue)
15145 {
15146 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15147 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15148 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15149 default:
15150 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15151 }
15152 break;
15153
15154 case IEMMODE_64BIT:
15155 switch (cbValue)
15156 {
15157 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15158 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15159 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15160 default:
15161 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15162 }
15163 break;
15164
15165 default:
15166 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15167 }
15168 }
15169
15170 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15171 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15172}
15173
15174
15175/**
15176 * Interface for rawmode to write execute an OUT instruction.
15177 *
15178 * @returns Strict VBox status code.
15179 * @param pVCpu The cross context virtual CPU structure.
15180 * @param cbInstr The instruction length in bytes.
15181 * @param u16Port The port to read.
15182 * @param fImm Whether the port is specified using an immediate operand or
15183 * using the implicit DX register.
15184 * @param cbReg The register size.
15185 *
15186 * @remarks In ring-0 not all of the state needs to be synced in.
15187 */
15188VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15189{
15190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15191 Assert(cbReg <= 4 && cbReg != 3);
15192
15193 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15194 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15195 Assert(!pVCpu->iem.s.cActiveMappings);
15196 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15197}
15198
15199
15200/**
15201 * Interface for rawmode to write execute an IN instruction.
15202 *
15203 * @returns Strict VBox status code.
15204 * @param pVCpu The cross context virtual CPU structure.
15205 * @param cbInstr The instruction length in bytes.
15206 * @param u16Port The port to read.
15207 * @param fImm Whether the port is specified using an immediate operand or
15208 * using the implicit DX.
15209 * @param cbReg The register size.
15210 */
15211VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15212{
15213 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15214 Assert(cbReg <= 4 && cbReg != 3);
15215
15216 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15217 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15218 Assert(!pVCpu->iem.s.cActiveMappings);
15219 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15220}
15221
15222
15223/**
15224 * Interface for HM and EM to write to a CRx register.
15225 *
15226 * @returns Strict VBox status code.
15227 * @param pVCpu The cross context virtual CPU structure.
15228 * @param cbInstr The instruction length in bytes.
15229 * @param iCrReg The control register number (destination).
15230 * @param iGReg The general purpose register number (source).
15231 *
15232 * @remarks In ring-0 not all of the state needs to be synced in.
15233 */
15234VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15235{
15236 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15237 Assert(iCrReg < 16);
15238 Assert(iGReg < 16);
15239
15240 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15241 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15242 Assert(!pVCpu->iem.s.cActiveMappings);
15243 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15244}
15245
15246
15247/**
15248 * Interface for HM and EM to read from a CRx register.
15249 *
15250 * @returns Strict VBox status code.
15251 * @param pVCpu The cross context virtual CPU structure.
15252 * @param cbInstr The instruction length in bytes.
15253 * @param iGReg The general purpose register number (destination).
15254 * @param iCrReg The control register number (source).
15255 *
15256 * @remarks In ring-0 not all of the state needs to be synced in.
15257 */
15258VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15259{
15260 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15261 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15262 | CPUMCTX_EXTRN_APIC_TPR);
15263 Assert(iCrReg < 16);
15264 Assert(iGReg < 16);
15265
15266 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15267 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15268 Assert(!pVCpu->iem.s.cActiveMappings);
15269 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15270}
15271
15272
15273/**
15274 * Interface for HM and EM to clear the CR0[TS] bit.
15275 *
15276 * @returns Strict VBox status code.
15277 * @param pVCpu The cross context virtual CPU structure.
15278 * @param cbInstr The instruction length in bytes.
15279 *
15280 * @remarks In ring-0 not all of the state needs to be synced in.
15281 */
15282VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15283{
15284 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15285
15286 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15287 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15288 Assert(!pVCpu->iem.s.cActiveMappings);
15289 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15290}
15291
15292
15293/**
15294 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15295 *
15296 * @returns Strict VBox status code.
15297 * @param pVCpu The cross context virtual CPU structure.
15298 * @param cbInstr The instruction length in bytes.
15299 * @param uValue The value to load into CR0.
15300 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15301 * memory operand. Otherwise pass NIL_RTGCPTR.
15302 *
15303 * @remarks In ring-0 not all of the state needs to be synced in.
15304 */
15305VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15306{
15307 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15308
15309 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15310 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15311 Assert(!pVCpu->iem.s.cActiveMappings);
15312 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15313}
15314
15315
15316/**
15317 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15318 *
15319 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15320 *
15321 * @returns Strict VBox status code.
15322 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15323 * @param cbInstr The instruction length in bytes.
15324 * @remarks In ring-0 not all of the state needs to be synced in.
15325 * @thread EMT(pVCpu)
15326 */
15327VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15328{
15329 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15330
15331 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15332 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15333 Assert(!pVCpu->iem.s.cActiveMappings);
15334 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15335}
15336
15337
15338/**
15339 * Interface for HM and EM to emulate the WBINVD instruction.
15340 *
15341 * @returns Strict VBox status code.
15342 * @param pVCpu The cross context virtual CPU structure.
15343 * @param cbInstr The instruction length in bytes.
15344 *
15345 * @remarks In ring-0 not all of the state needs to be synced in.
15346 */
15347VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15348{
15349 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15350
15351 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15352 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15353 Assert(!pVCpu->iem.s.cActiveMappings);
15354 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15355}
15356
15357
15358/**
15359 * Interface for HM and EM to emulate the INVD instruction.
15360 *
15361 * @returns Strict VBox status code.
15362 * @param pVCpu The cross context virtual CPU structure.
15363 * @param cbInstr The instruction length in bytes.
15364 *
15365 * @remarks In ring-0 not all of the state needs to be synced in.
15366 */
15367VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15368{
15369 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15370
15371 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15372 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15373 Assert(!pVCpu->iem.s.cActiveMappings);
15374 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15375}
15376
15377
15378/**
15379 * Interface for HM and EM to emulate the INVLPG instruction.
15380 *
15381 * @returns Strict VBox status code.
15382 * @retval VINF_PGM_SYNC_CR3
15383 *
15384 * @param pVCpu The cross context virtual CPU structure.
15385 * @param cbInstr The instruction length in bytes.
15386 * @param GCPtrPage The effective address of the page to invalidate.
15387 *
15388 * @remarks In ring-0 not all of the state needs to be synced in.
15389 */
15390VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15391{
15392 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15393
15394 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15395 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15396 Assert(!pVCpu->iem.s.cActiveMappings);
15397 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15398}
15399
15400
15401/**
15402 * Interface for HM and EM to emulate the CPUID instruction.
15403 *
15404 * @returns Strict VBox status code.
15405 *
15406 * @param pVCpu The cross context virtual CPU structure.
15407 * @param cbInstr The instruction length in bytes.
15408 *
15409 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15410 */
15411VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15412{
15413 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15414 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15415
15416 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15417 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15418 Assert(!pVCpu->iem.s.cActiveMappings);
15419 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15420}
15421
15422
15423/**
15424 * Interface for HM and EM to emulate the RDPMC instruction.
15425 *
15426 * @returns Strict VBox status code.
15427 *
15428 * @param pVCpu The cross context virtual CPU structure.
15429 * @param cbInstr The instruction length in bytes.
15430 *
15431 * @remarks Not all of the state needs to be synced in.
15432 */
15433VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15434{
15435 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15436 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15437
15438 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15439 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15440 Assert(!pVCpu->iem.s.cActiveMappings);
15441 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15442}
15443
15444
15445/**
15446 * Interface for HM and EM to emulate the RDTSC instruction.
15447 *
15448 * @returns Strict VBox status code.
15449 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15450 *
15451 * @param pVCpu The cross context virtual CPU structure.
15452 * @param cbInstr The instruction length in bytes.
15453 *
15454 * @remarks Not all of the state needs to be synced in.
15455 */
15456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15457{
15458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15459 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15460
15461 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15463 Assert(!pVCpu->iem.s.cActiveMappings);
15464 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15465}
15466
15467
15468/**
15469 * Interface for HM and EM to emulate the RDTSCP instruction.
15470 *
15471 * @returns Strict VBox status code.
15472 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15473 *
15474 * @param pVCpu The cross context virtual CPU structure.
15475 * @param cbInstr The instruction length in bytes.
15476 *
15477 * @remarks Not all of the state needs to be synced in. Recommended
15478 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15479 */
15480VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15481{
15482 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15483 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15484
15485 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15486 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15487 Assert(!pVCpu->iem.s.cActiveMappings);
15488 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15489}
15490
15491
15492/**
15493 * Interface for HM and EM to emulate the RDMSR instruction.
15494 *
15495 * @returns Strict VBox status code.
15496 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15497 *
15498 * @param pVCpu The cross context virtual CPU structure.
15499 * @param cbInstr The instruction length in bytes.
15500 *
15501 * @remarks Not all of the state needs to be synced in. Requires RCX and
15502 * (currently) all MSRs.
15503 */
15504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15505{
15506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15507 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15508
15509 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15510 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15511 Assert(!pVCpu->iem.s.cActiveMappings);
15512 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15513}
15514
15515
15516/**
15517 * Interface for HM and EM to emulate the WRMSR instruction.
15518 *
15519 * @returns Strict VBox status code.
15520 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15521 *
15522 * @param pVCpu The cross context virtual CPU structure.
15523 * @param cbInstr The instruction length in bytes.
15524 *
15525 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15526 * and (currently) all MSRs.
15527 */
15528VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15529{
15530 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15531 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15532 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15533
15534 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15535 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15536 Assert(!pVCpu->iem.s.cActiveMappings);
15537 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15538}
15539
15540
15541/**
15542 * Interface for HM and EM to emulate the MONITOR instruction.
15543 *
15544 * @returns Strict VBox status code.
15545 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15546 *
15547 * @param pVCpu The cross context virtual CPU structure.
15548 * @param cbInstr The instruction length in bytes.
15549 *
15550 * @remarks Not all of the state needs to be synced in.
15551 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15552 * are used.
15553 */
15554VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15555{
15556 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15557 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15558
15559 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15560 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15561 Assert(!pVCpu->iem.s.cActiveMappings);
15562 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15563}
15564
15565
15566/**
15567 * Interface for HM and EM to emulate the MWAIT instruction.
15568 *
15569 * @returns Strict VBox status code.
15570 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15571 *
15572 * @param pVCpu The cross context virtual CPU structure.
15573 * @param cbInstr The instruction length in bytes.
15574 *
15575 * @remarks Not all of the state needs to be synced in.
15576 */
15577VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15578{
15579 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15580 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15581
15582 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15583 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15584 Assert(!pVCpu->iem.s.cActiveMappings);
15585 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15586}
15587
15588
15589/**
15590 * Interface for HM and EM to emulate the HLT instruction.
15591 *
15592 * @returns Strict VBox status code.
15593 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15594 *
15595 * @param pVCpu The cross context virtual CPU structure.
15596 * @param cbInstr The instruction length in bytes.
15597 *
15598 * @remarks Not all of the state needs to be synced in.
15599 */
15600VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15601{
15602 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15603
15604 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15605 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15606 Assert(!pVCpu->iem.s.cActiveMappings);
15607 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15608}
15609
15610
15611/**
15612 * Checks if IEM is in the process of delivering an event (interrupt or
15613 * exception).
15614 *
15615 * @returns true if we're in the process of raising an interrupt or exception,
15616 * false otherwise.
15617 * @param pVCpu The cross context virtual CPU structure.
15618 * @param puVector Where to store the vector associated with the
15619 * currently delivered event, optional.
15620 * @param pfFlags Where to store th event delivery flags (see
15621 * IEM_XCPT_FLAGS_XXX), optional.
15622 * @param puErr Where to store the error code associated with the
15623 * event, optional.
15624 * @param puCr2 Where to store the CR2 associated with the event,
15625 * optional.
15626 * @remarks The caller should check the flags to determine if the error code and
15627 * CR2 are valid for the event.
15628 */
15629VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15630{
15631 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15632 if (fRaisingXcpt)
15633 {
15634 if (puVector)
15635 *puVector = pVCpu->iem.s.uCurXcpt;
15636 if (pfFlags)
15637 *pfFlags = pVCpu->iem.s.fCurXcpt;
15638 if (puErr)
15639 *puErr = pVCpu->iem.s.uCurXcptErr;
15640 if (puCr2)
15641 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15642 }
15643 return fRaisingXcpt;
15644}
15645
15646#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15647
15648/**
15649 * Interface for HM and EM to emulate the CLGI instruction.
15650 *
15651 * @returns Strict VBox status code.
15652 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15653 * @param cbInstr The instruction length in bytes.
15654 * @thread EMT(pVCpu)
15655 */
15656VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15657{
15658 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15659
15660 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15661 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15662 Assert(!pVCpu->iem.s.cActiveMappings);
15663 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15664}
15665
15666
15667/**
15668 * Interface for HM and EM to emulate the STGI instruction.
15669 *
15670 * @returns Strict VBox status code.
15671 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15672 * @param cbInstr The instruction length in bytes.
15673 * @thread EMT(pVCpu)
15674 */
15675VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15676{
15677 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15678
15679 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15680 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15681 Assert(!pVCpu->iem.s.cActiveMappings);
15682 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15683}
15684
15685
15686/**
15687 * Interface for HM and EM to emulate the VMLOAD instruction.
15688 *
15689 * @returns Strict VBox status code.
15690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15691 * @param cbInstr The instruction length in bytes.
15692 * @thread EMT(pVCpu)
15693 */
15694VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15695{
15696 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15697
15698 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15699 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15700 Assert(!pVCpu->iem.s.cActiveMappings);
15701 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15702}
15703
15704
15705/**
15706 * Interface for HM and EM to emulate the VMSAVE instruction.
15707 *
15708 * @returns Strict VBox status code.
15709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15710 * @param cbInstr The instruction length in bytes.
15711 * @thread EMT(pVCpu)
15712 */
15713VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15714{
15715 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15716
15717 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15718 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15719 Assert(!pVCpu->iem.s.cActiveMappings);
15720 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15721}
15722
15723
15724/**
15725 * Interface for HM and EM to emulate the INVLPGA instruction.
15726 *
15727 * @returns Strict VBox status code.
15728 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15729 * @param cbInstr The instruction length in bytes.
15730 * @thread EMT(pVCpu)
15731 */
15732VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15733{
15734 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15735
15736 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15737 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15738 Assert(!pVCpu->iem.s.cActiveMappings);
15739 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15740}
15741
15742
15743/**
15744 * Interface for HM and EM to emulate the VMRUN instruction.
15745 *
15746 * @returns Strict VBox status code.
15747 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15748 * @param cbInstr The instruction length in bytes.
15749 * @thread EMT(pVCpu)
15750 */
15751VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15752{
15753 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15754 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15755
15756 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15757 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15758 Assert(!pVCpu->iem.s.cActiveMappings);
15759 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15760}
15761
15762
15763/**
15764 * Interface for HM and EM to emulate \#VMEXIT.
15765 *
15766 * @returns Strict VBox status code.
15767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15768 * @param uExitCode The exit code.
15769 * @param uExitInfo1 The exit info. 1 field.
15770 * @param uExitInfo2 The exit info. 2 field.
15771 * @thread EMT(pVCpu)
15772 */
15773VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15774{
15775 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15776 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15777 if (pVCpu->iem.s.cActiveMappings)
15778 iemMemRollback(pVCpu);
15779 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15780}
15781
15782#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15783
15784#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15785
15786/**
15787 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15788 *
15789 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15790 * are performed. Bounds checks are strict builds only.
15791 *
15792 * @param pVmcs Pointer to the virtual VMCS.
15793 * @param u64VmcsField The VMCS field.
15794 * @param pu64Dst Where to store the VMCS value.
15795 *
15796 * @remarks May be called with interrupts disabled.
15797 * @todo This should probably be moved to CPUM someday.
15798 */
15799VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15800{
15801 AssertPtr(pVmcs);
15802 AssertPtr(pu64Dst);
15803 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15804}
15805
15806
15807/**
15808 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15809 *
15810 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15811 * are performed. Bounds checks are strict builds only.
15812 *
15813 * @param pVmcs Pointer to the virtual VMCS.
15814 * @param u64VmcsField The VMCS field.
15815 * @param u64Val The value to write.
15816 *
15817 * @remarks May be called with interrupts disabled.
15818 * @todo This should probably be moved to CPUM someday.
15819 */
15820VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15821{
15822 AssertPtr(pVmcs);
15823 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15824}
15825
15826
15827/**
15828 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15829 *
15830 * @returns Strict VBox status code.
15831 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15832 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15833 * the x2APIC device.
15834 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15835 *
15836 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15837 * @param idMsr The MSR being read.
15838 * @param pu64Value Pointer to the value being written or where to store the
15839 * value being read.
15840 * @param fWrite Whether this is an MSR write or read access.
15841 * @thread EMT(pVCpu)
15842 */
15843VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15844{
15845 Assert(pu64Value);
15846
15847 VBOXSTRICTRC rcStrict;
15848 if (fWrite)
15849 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15850 else
15851 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15852 Assert(!pVCpu->iem.s.cActiveMappings);
15853 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15854
15855}
15856
15857
15858/**
15859 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15860 *
15861 * @returns Strict VBox status code.
15862 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15863 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15864 *
15865 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15866 * @param pExitInfo Pointer to the VM-exit information.
15867 * @param pExitEventInfo Pointer to the VM-exit event information.
15868 * @thread EMT(pVCpu)
15869 */
15870VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15871{
15872 Assert(pExitInfo);
15873 Assert(pExitEventInfo);
15874 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15875 Assert(!pVCpu->iem.s.cActiveMappings);
15876 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15877
15878}
15879
15880
15881/**
15882 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15883 * VM-exit.
15884 *
15885 * @returns Strict VBox status code.
15886 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15887 * @thread EMT(pVCpu)
15888 */
15889VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15890{
15891 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15892 Assert(!pVCpu->iem.s.cActiveMappings);
15893 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15894}
15895
15896
15897/**
15898 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15899 *
15900 * @returns Strict VBox status code.
15901 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15902 * @thread EMT(pVCpu)
15903 */
15904VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15905{
15906 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15907 Assert(!pVCpu->iem.s.cActiveMappings);
15908 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15909}
15910
15911
15912/**
15913 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15914 *
15915 * @returns Strict VBox status code.
15916 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15917 * @param uVector The external interrupt vector (pass 0 if the external
15918 * interrupt is still pending).
15919 * @param fIntPending Whether the external interrupt is pending or
15920 * acknowdledged in the interrupt controller.
15921 * @thread EMT(pVCpu)
15922 */
15923VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15924{
15925 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15926 Assert(!pVCpu->iem.s.cActiveMappings);
15927 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15928}
15929
15930
15931/**
15932 * Interface for HM and EM to emulate VM-exit due to exceptions.
15933 *
15934 * Exception includes NMIs, software exceptions (those generated by INT3 or
15935 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15936 *
15937 * @returns Strict VBox status code.
15938 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15939 * @param pExitInfo Pointer to the VM-exit information.
15940 * @param pExitEventInfo Pointer to the VM-exit event information.
15941 * @thread EMT(pVCpu)
15942 */
15943VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15944{
15945 Assert(pExitInfo);
15946 Assert(pExitEventInfo);
15947 Assert(pExitInfo->uReason == VMX_EXIT_XCPT_OR_NMI);
15948 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15949 Assert(!pVCpu->iem.s.cActiveMappings);
15950 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15951}
15952
15953
15954/**
15955 * Interface for HM and EM to emulate VM-exit due to NMIs.
15956 *
15957 * @returns Strict VBox status code.
15958 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15959 * @thread EMT(pVCpu)
15960 */
15961VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPU pVCpu)
15962{
15963 VMXVEXITINFO ExitInfo;
15964 RT_ZERO(ExitInfo);
15965 VMXVEXITEVENTINFO ExitEventInfo;
15966 RT_ZERO(ExitInfo);
15967 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15968 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15969 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15970
15971 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15972 Assert(!pVCpu->iem.s.cActiveMappings);
15973 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15974}
15975
15976
15977/**
15978 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15979 *
15980 * @returns Strict VBox status code.
15981 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15982 * @thread EMT(pVCpu)
15983 */
15984VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPU pVCpu)
15985{
15986 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15987 Assert(!pVCpu->iem.s.cActiveMappings);
15988 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15989}
15990
15991
15992/**
15993 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15994 *
15995 * @returns Strict VBox status code.
15996 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15997 * @param uVector The SIPI vector.
15998 * @thread EMT(pVCpu)
15999 */
16000VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
16001{
16002 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
16003 Assert(!pVCpu->iem.s.cActiveMappings);
16004 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16005}
16006
16007
16008/**
16009 * Interface for HM and EM to emulate a VM-exit.
16010 *
16011 * If a specialized version of a VM-exit handler exists, that must be used instead.
16012 *
16013 * @returns Strict VBox status code.
16014 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16015 * @param uExitReason The VM-exit reason.
16016 * @param u64ExitQual The Exit qualification.
16017 * @thread EMT(pVCpu)
16018 */
16019VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
16020{
16021 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
16022 Assert(!pVCpu->iem.s.cActiveMappings);
16023 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16024}
16025
16026
16027/**
16028 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16029 *
16030 * This is meant to be used for those instructions that VMX provides additional
16031 * decoding information beyond just the instruction length!
16032 *
16033 * @returns Strict VBox status code.
16034 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16035 * @param pExitInfo Pointer to the VM-exit information.
16036 * @thread EMT(pVCpu)
16037 */
16038VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16039{
16040 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
16041 Assert(!pVCpu->iem.s.cActiveMappings);
16042 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16043}
16044
16045
16046/**
16047 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16048 *
16049 * This is meant to be used for those instructions that VMX provides only the
16050 * instruction length.
16051 *
16052 * @returns Strict VBox status code.
16053 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16054 * @param pExitInfo Pointer to the VM-exit information.
16055 * @param cbInstr The instruction length in bytes.
16056 * @thread EMT(pVCpu)
16057 */
16058VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
16059{
16060 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
16061 Assert(!pVCpu->iem.s.cActiveMappings);
16062 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16063}
16064
16065
16066/**
16067 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16068 *
16069 * @returns Strict VBox status code.
16070 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16071 * @param pExitInfo Pointer to the VM-exit information.
16072 * @param pExitEventInfo Pointer to the VM-exit event information.
16073 * @thread EMT(pVCpu)
16074 */
16075VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16076{
16077 Assert(pExitInfo);
16078 Assert(pExitEventInfo);
16079 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16080 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16081 Assert(!pVCpu->iem.s.cActiveMappings);
16082 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16083}
16084
16085
16086/**
16087 * Interface for HM and EM to emulate the VMREAD instruction.
16088 *
16089 * @returns Strict VBox status code.
16090 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16091 * @param pExitInfo Pointer to the VM-exit information.
16092 * @thread EMT(pVCpu)
16093 */
16094VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16095{
16096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16097 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16098 Assert(pExitInfo);
16099
16100 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16101
16102 VBOXSTRICTRC rcStrict;
16103 uint8_t const cbInstr = pExitInfo->cbInstr;
16104 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16105 uint64_t const u64FieldEnc = fIs64BitMode
16106 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16107 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16108 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16109 {
16110 if (fIs64BitMode)
16111 {
16112 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16113 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16114 }
16115 else
16116 {
16117 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16118 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16119 }
16120 }
16121 else
16122 {
16123 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16124 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16125 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16126 }
16127 Assert(!pVCpu->iem.s.cActiveMappings);
16128 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16129}
16130
16131
16132/**
16133 * Interface for HM and EM to emulate the VMWRITE instruction.
16134 *
16135 * @returns Strict VBox status code.
16136 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16137 * @param pExitInfo Pointer to the VM-exit information.
16138 * @thread EMT(pVCpu)
16139 */
16140VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16141{
16142 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16143 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16144 Assert(pExitInfo);
16145
16146 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16147
16148 uint64_t u64Val;
16149 uint8_t iEffSeg;
16150 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16151 {
16152 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16153 iEffSeg = UINT8_MAX;
16154 }
16155 else
16156 {
16157 u64Val = pExitInfo->GCPtrEffAddr;
16158 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16159 }
16160 uint8_t const cbInstr = pExitInfo->cbInstr;
16161 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16162 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16163 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16164 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16165 Assert(!pVCpu->iem.s.cActiveMappings);
16166 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16167}
16168
16169
16170/**
16171 * Interface for HM and EM to emulate the VMPTRLD instruction.
16172 *
16173 * @returns Strict VBox status code.
16174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16175 * @param pExitInfo Pointer to the VM-exit information.
16176 * @thread EMT(pVCpu)
16177 */
16178VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16179{
16180 Assert(pExitInfo);
16181 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16182 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16183
16184 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16185
16186 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16187 uint8_t const cbInstr = pExitInfo->cbInstr;
16188 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16189 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16190 Assert(!pVCpu->iem.s.cActiveMappings);
16191 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16192}
16193
16194
16195/**
16196 * Interface for HM and EM to emulate the VMPTRST instruction.
16197 *
16198 * @returns Strict VBox status code.
16199 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16200 * @param pExitInfo Pointer to the VM-exit information.
16201 * @thread EMT(pVCpu)
16202 */
16203VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16204{
16205 Assert(pExitInfo);
16206 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16207 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16208
16209 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16210
16211 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16212 uint8_t const cbInstr = pExitInfo->cbInstr;
16213 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16214 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16215 Assert(!pVCpu->iem.s.cActiveMappings);
16216 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16217}
16218
16219
16220/**
16221 * Interface for HM and EM to emulate the VMCLEAR instruction.
16222 *
16223 * @returns Strict VBox status code.
16224 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16225 * @param pExitInfo Pointer to the VM-exit information.
16226 * @thread EMT(pVCpu)
16227 */
16228VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16229{
16230 Assert(pExitInfo);
16231 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16232 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16233
16234 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16235
16236 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16237 uint8_t const cbInstr = pExitInfo->cbInstr;
16238 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16239 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16240 Assert(!pVCpu->iem.s.cActiveMappings);
16241 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16242}
16243
16244
16245/**
16246 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16247 *
16248 * @returns Strict VBox status code.
16249 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16250 * @param cbInstr The instruction length in bytes.
16251 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16252 * VMXINSTRID_VMRESUME).
16253 * @thread EMT(pVCpu)
16254 */
16255VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16256{
16257 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16258 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16259
16260 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16261 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16262 Assert(!pVCpu->iem.s.cActiveMappings);
16263 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16264}
16265
16266
16267/**
16268 * Interface for HM and EM to emulate the VMXON instruction.
16269 *
16270 * @returns Strict VBox status code.
16271 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16272 * @param pExitInfo Pointer to the VM-exit information.
16273 * @thread EMT(pVCpu)
16274 */
16275VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16276{
16277 Assert(pExitInfo);
16278 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16279 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16280
16281 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16282
16283 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16284 uint8_t const cbInstr = pExitInfo->cbInstr;
16285 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16286 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16287 Assert(!pVCpu->iem.s.cActiveMappings);
16288 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16289}
16290
16291
16292/**
16293 * Interface for HM and EM to emulate the VMXOFF instruction.
16294 *
16295 * @returns Strict VBox status code.
16296 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16297 * @param cbInstr The instruction length in bytes.
16298 * @thread EMT(pVCpu)
16299 */
16300VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16301{
16302 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16303 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16304
16305 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16306 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16307 Assert(!pVCpu->iem.s.cActiveMappings);
16308 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16309}
16310
16311
16312/**
16313 * Interface for HM and EM to emulate the INVVPID instruction.
16314 *
16315 * @returns Strict VBox status code.
16316 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16317 * @param pExitInfo Pointer to the VM-exit information.
16318 * @thread EMT(pVCpu)
16319 */
16320VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16321{
16322 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16323 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16324 Assert(pExitInfo);
16325
16326 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16327
16328 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16329 uint8_t const cbInstr = pExitInfo->cbInstr;
16330 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16331 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16332 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16333 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16334 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16335 Assert(!pVCpu->iem.s.cActiveMappings);
16336 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16337}
16338
16339
16340/**
16341 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16342 *
16343 * @remarks The @a pvUser argument is currently unused.
16344 */
16345PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16346 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16347 PGMACCESSORIGIN enmOrigin, void *pvUser)
16348{
16349 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16350
16351 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16352 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16353 {
16354 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16355 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16356
16357 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16358 * Currently they will go through as read accesses. */
16359 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16360 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16361 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16362 if (RT_FAILURE(rcStrict))
16363 return rcStrict;
16364
16365 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16366 return VINF_SUCCESS;
16367 }
16368
16369 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16370 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16371 if (RT_FAILURE(rc))
16372 return rc;
16373
16374 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16375 return VINF_PGM_HANDLER_DO_DEFAULT;
16376}
16377
16378#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16379
16380#ifdef IN_RING3
16381
16382/**
16383 * Handles the unlikely and probably fatal merge cases.
16384 *
16385 * @returns Merged status code.
16386 * @param rcStrict Current EM status code.
16387 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16388 * with @a rcStrict.
16389 * @param iMemMap The memory mapping index. For error reporting only.
16390 * @param pVCpu The cross context virtual CPU structure of the calling
16391 * thread, for error reporting only.
16392 */
16393DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16394 unsigned iMemMap, PVMCPU pVCpu)
16395{
16396 if (RT_FAILURE_NP(rcStrict))
16397 return rcStrict;
16398
16399 if (RT_FAILURE_NP(rcStrictCommit))
16400 return rcStrictCommit;
16401
16402 if (rcStrict == rcStrictCommit)
16403 return rcStrictCommit;
16404
16405 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16406 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16407 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16410 return VERR_IOM_FF_STATUS_IPE;
16411}
16412
16413
16414/**
16415 * Helper for IOMR3ProcessForceFlag.
16416 *
16417 * @returns Merged status code.
16418 * @param rcStrict Current EM status code.
16419 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16420 * with @a rcStrict.
16421 * @param iMemMap The memory mapping index. For error reporting only.
16422 * @param pVCpu The cross context virtual CPU structure of the calling
16423 * thread, for error reporting only.
16424 */
16425DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16426{
16427 /* Simple. */
16428 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16429 return rcStrictCommit;
16430
16431 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16432 return rcStrict;
16433
16434 /* EM scheduling status codes. */
16435 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16436 && rcStrict <= VINF_EM_LAST))
16437 {
16438 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16439 && rcStrictCommit <= VINF_EM_LAST))
16440 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16441 }
16442
16443 /* Unlikely */
16444 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16445}
16446
16447
16448/**
16449 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16450 *
16451 * @returns Merge between @a rcStrict and what the commit operation returned.
16452 * @param pVM The cross context VM structure.
16453 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16454 * @param rcStrict The status code returned by ring-0 or raw-mode.
16455 */
16456VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16457{
16458 /*
16459 * Reset the pending commit.
16460 */
16461 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16462 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16463 ("%#x %#x %#x\n",
16464 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16465 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16466
16467 /*
16468 * Commit the pending bounce buffers (usually just one).
16469 */
16470 unsigned cBufs = 0;
16471 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16472 while (iMemMap-- > 0)
16473 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16474 {
16475 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16476 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16477 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16478
16479 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16480 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16481 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16482
16483 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16484 {
16485 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16486 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16487 pbBuf,
16488 cbFirst,
16489 PGMACCESSORIGIN_IEM);
16490 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16491 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16492 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16493 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16494 }
16495
16496 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16497 {
16498 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16499 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16500 pbBuf + cbFirst,
16501 cbSecond,
16502 PGMACCESSORIGIN_IEM);
16503 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16504 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16505 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16506 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16507 }
16508 cBufs++;
16509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16510 }
16511
16512 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16513 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16514 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16515 pVCpu->iem.s.cActiveMappings = 0;
16516 return rcStrict;
16517}
16518
16519#endif /* IN_RING3 */
16520
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette