VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 77896

Last change on this file since 77896 was 77896, checked in by vboxsync, 6 years ago

VMM/IEM: Fix CPL checks for INT1 (ICEBP) generated #DBs. Fix v8086 mode IOPL checks for INT1 (ICEBP) and INTO.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 647.7 KB
Line 
1/* $Id: IEMAll.cpp 77896 2019-03-27 04:51:37Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handle for triple faults.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
450 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmi(PVMCPU pVCpu);
987IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
988IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
989IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
990IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmiWindow(PVMCPU pVCpu);
991IEM_STATIC VBOXSTRICTRC iemVmxVmexitMtf(PVMCPU pVCpu);
992IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
993IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
994IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
995IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
996#endif
997
998#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
999IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
1000IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
1001#endif
1002
1003
1004/**
1005 * Sets the pass up status.
1006 *
1007 * @returns VINF_SUCCESS.
1008 * @param pVCpu The cross context virtual CPU structure of the
1009 * calling thread.
1010 * @param rcPassUp The pass up status. Must be informational.
1011 * VINF_SUCCESS is not allowed.
1012 */
1013IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1014{
1015 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1016
1017 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1018 if (rcOldPassUp == VINF_SUCCESS)
1019 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1020 /* If both are EM scheduling codes, use EM priority rules. */
1021 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1022 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1023 {
1024 if (rcPassUp < rcOldPassUp)
1025 {
1026 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1028 }
1029 else
1030 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1031 }
1032 /* Override EM scheduling with specific status code. */
1033 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1034 {
1035 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1036 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1037 }
1038 /* Don't override specific status code, first come first served. */
1039 else
1040 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1041 return VINF_SUCCESS;
1042}
1043
1044
1045/**
1046 * Calculates the CPU mode.
1047 *
1048 * This is mainly for updating IEMCPU::enmCpuMode.
1049 *
1050 * @returns CPU mode.
1051 * @param pVCpu The cross context virtual CPU structure of the
1052 * calling thread.
1053 */
1054DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1055{
1056 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1057 return IEMMODE_64BIT;
1058 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1059 return IEMMODE_32BIT;
1060 return IEMMODE_16BIT;
1061}
1062
1063
1064/**
1065 * Initializes the execution state.
1066 *
1067 * @param pVCpu The cross context virtual CPU structure of the
1068 * calling thread.
1069 * @param fBypassHandlers Whether to bypass access handlers.
1070 *
1071 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1072 * side-effects in strict builds.
1073 */
1074DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1075{
1076 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1077 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1078
1079#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1084 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1086 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1087 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1088#endif
1089
1090#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1091 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1092#endif
1093 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1094 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1095#ifdef VBOX_STRICT
1096 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1097 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1098 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1099 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1100 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1101 pVCpu->iem.s.uRexReg = 127;
1102 pVCpu->iem.s.uRexB = 127;
1103 pVCpu->iem.s.offModRm = 127;
1104 pVCpu->iem.s.uRexIndex = 127;
1105 pVCpu->iem.s.iEffSeg = 127;
1106 pVCpu->iem.s.idxPrefix = 127;
1107 pVCpu->iem.s.uVex3rdReg = 127;
1108 pVCpu->iem.s.uVexLength = 127;
1109 pVCpu->iem.s.fEvexStuff = 127;
1110 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1111# ifdef IEM_WITH_CODE_TLB
1112 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1113 pVCpu->iem.s.pbInstrBuf = NULL;
1114 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1115 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1116 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1117 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1118# else
1119 pVCpu->iem.s.offOpcode = 127;
1120 pVCpu->iem.s.cbOpcode = 127;
1121# endif
1122#endif
1123
1124 pVCpu->iem.s.cActiveMappings = 0;
1125 pVCpu->iem.s.iNextMapping = 0;
1126 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1127 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1128#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1129 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1130 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1131 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1132 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1133 if (!pVCpu->iem.s.fInPatchCode)
1134 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1135#endif
1136}
1137
1138#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1139/**
1140 * Performs a minimal reinitialization of the execution state.
1141 *
1142 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1143 * 'world-switch' types operations on the CPU. Currently only nested
1144 * hardware-virtualization uses it.
1145 *
1146 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1147 */
1148IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1149{
1150 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1151 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1152
1153 pVCpu->iem.s.uCpl = uCpl;
1154 pVCpu->iem.s.enmCpuMode = enmMode;
1155 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1156 pVCpu->iem.s.enmEffAddrMode = enmMode;
1157 if (enmMode != IEMMODE_64BIT)
1158 {
1159 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1160 pVCpu->iem.s.enmEffOpSize = enmMode;
1161 }
1162 else
1163 {
1164 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1165 pVCpu->iem.s.enmEffOpSize = enmMode;
1166 }
1167 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1168#ifndef IEM_WITH_CODE_TLB
1169 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1170 pVCpu->iem.s.offOpcode = 0;
1171 pVCpu->iem.s.cbOpcode = 0;
1172#endif
1173 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1174}
1175#endif
1176
1177/**
1178 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1179 *
1180 * @param pVCpu The cross context virtual CPU structure of the
1181 * calling thread.
1182 */
1183DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1184{
1185 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1186#ifdef VBOX_STRICT
1187# ifdef IEM_WITH_CODE_TLB
1188 NOREF(pVCpu);
1189# else
1190 pVCpu->iem.s.cbOpcode = 0;
1191# endif
1192#else
1193 NOREF(pVCpu);
1194#endif
1195}
1196
1197
1198/**
1199 * Initializes the decoder state.
1200 *
1201 * iemReInitDecoder is mostly a copy of this function.
1202 *
1203 * @param pVCpu The cross context virtual CPU structure of the
1204 * calling thread.
1205 * @param fBypassHandlers Whether to bypass access handlers.
1206 */
1207DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1208{
1209 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1210 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1211
1212#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1221#endif
1222
1223#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1224 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1225#endif
1226 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1227 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1228 pVCpu->iem.s.enmCpuMode = enmMode;
1229 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1230 pVCpu->iem.s.enmEffAddrMode = enmMode;
1231 if (enmMode != IEMMODE_64BIT)
1232 {
1233 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1234 pVCpu->iem.s.enmEffOpSize = enmMode;
1235 }
1236 else
1237 {
1238 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1239 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1240 }
1241 pVCpu->iem.s.fPrefixes = 0;
1242 pVCpu->iem.s.uRexReg = 0;
1243 pVCpu->iem.s.uRexB = 0;
1244 pVCpu->iem.s.uRexIndex = 0;
1245 pVCpu->iem.s.idxPrefix = 0;
1246 pVCpu->iem.s.uVex3rdReg = 0;
1247 pVCpu->iem.s.uVexLength = 0;
1248 pVCpu->iem.s.fEvexStuff = 0;
1249 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1250#ifdef IEM_WITH_CODE_TLB
1251 pVCpu->iem.s.pbInstrBuf = NULL;
1252 pVCpu->iem.s.offInstrNextByte = 0;
1253 pVCpu->iem.s.offCurInstrStart = 0;
1254# ifdef VBOX_STRICT
1255 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1256 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1257 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1258# endif
1259#else
1260 pVCpu->iem.s.offOpcode = 0;
1261 pVCpu->iem.s.cbOpcode = 0;
1262#endif
1263 pVCpu->iem.s.offModRm = 0;
1264 pVCpu->iem.s.cActiveMappings = 0;
1265 pVCpu->iem.s.iNextMapping = 0;
1266 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1267 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1268#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1269 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1270 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1271 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1272 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1273 if (!pVCpu->iem.s.fInPatchCode)
1274 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1275#endif
1276
1277#ifdef DBGFTRACE_ENABLED
1278 switch (enmMode)
1279 {
1280 case IEMMODE_64BIT:
1281 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1282 break;
1283 case IEMMODE_32BIT:
1284 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1285 break;
1286 case IEMMODE_16BIT:
1287 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1288 break;
1289 }
1290#endif
1291}
1292
1293
1294/**
1295 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1296 *
1297 * This is mostly a copy of iemInitDecoder.
1298 *
1299 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1300 */
1301DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1302{
1303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1304
1305#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1313 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1314#endif
1315
1316 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1317 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1318 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1319 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1320 pVCpu->iem.s.enmEffAddrMode = enmMode;
1321 if (enmMode != IEMMODE_64BIT)
1322 {
1323 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1324 pVCpu->iem.s.enmEffOpSize = enmMode;
1325 }
1326 else
1327 {
1328 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1329 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1330 }
1331 pVCpu->iem.s.fPrefixes = 0;
1332 pVCpu->iem.s.uRexReg = 0;
1333 pVCpu->iem.s.uRexB = 0;
1334 pVCpu->iem.s.uRexIndex = 0;
1335 pVCpu->iem.s.idxPrefix = 0;
1336 pVCpu->iem.s.uVex3rdReg = 0;
1337 pVCpu->iem.s.uVexLength = 0;
1338 pVCpu->iem.s.fEvexStuff = 0;
1339 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1340#ifdef IEM_WITH_CODE_TLB
1341 if (pVCpu->iem.s.pbInstrBuf)
1342 {
1343 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1344 - pVCpu->iem.s.uInstrBufPc;
1345 if (off < pVCpu->iem.s.cbInstrBufTotal)
1346 {
1347 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1348 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1349 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1350 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1351 else
1352 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1353 }
1354 else
1355 {
1356 pVCpu->iem.s.pbInstrBuf = NULL;
1357 pVCpu->iem.s.offInstrNextByte = 0;
1358 pVCpu->iem.s.offCurInstrStart = 0;
1359 pVCpu->iem.s.cbInstrBuf = 0;
1360 pVCpu->iem.s.cbInstrBufTotal = 0;
1361 }
1362 }
1363 else
1364 {
1365 pVCpu->iem.s.offInstrNextByte = 0;
1366 pVCpu->iem.s.offCurInstrStart = 0;
1367 pVCpu->iem.s.cbInstrBuf = 0;
1368 pVCpu->iem.s.cbInstrBufTotal = 0;
1369 }
1370#else
1371 pVCpu->iem.s.cbOpcode = 0;
1372 pVCpu->iem.s.offOpcode = 0;
1373#endif
1374 pVCpu->iem.s.offModRm = 0;
1375 Assert(pVCpu->iem.s.cActiveMappings == 0);
1376 pVCpu->iem.s.iNextMapping = 0;
1377 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1378 Assert(pVCpu->iem.s.fBypassHandlers == false);
1379#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1380 if (!pVCpu->iem.s.fInPatchCode)
1381 { /* likely */ }
1382 else
1383 {
1384 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1385 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1386 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1387 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1388 if (!pVCpu->iem.s.fInPatchCode)
1389 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1390 }
1391#endif
1392
1393#ifdef DBGFTRACE_ENABLED
1394 switch (enmMode)
1395 {
1396 case IEMMODE_64BIT:
1397 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1398 break;
1399 case IEMMODE_32BIT:
1400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1401 break;
1402 case IEMMODE_16BIT:
1403 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1404 break;
1405 }
1406#endif
1407}
1408
1409
1410
1411/**
1412 * Prefetch opcodes the first time when starting executing.
1413 *
1414 * @returns Strict VBox status code.
1415 * @param pVCpu The cross context virtual CPU structure of the
1416 * calling thread.
1417 * @param fBypassHandlers Whether to bypass access handlers.
1418 */
1419IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1420{
1421 iemInitDecoder(pVCpu, fBypassHandlers);
1422
1423#ifdef IEM_WITH_CODE_TLB
1424 /** @todo Do ITLB lookup here. */
1425
1426#else /* !IEM_WITH_CODE_TLB */
1427
1428 /*
1429 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1430 *
1431 * First translate CS:rIP to a physical address.
1432 */
1433 uint32_t cbToTryRead;
1434 RTGCPTR GCPtrPC;
1435 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1436 {
1437 cbToTryRead = PAGE_SIZE;
1438 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1439 if (IEM_IS_CANONICAL(GCPtrPC))
1440 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1441 else
1442 return iemRaiseGeneralProtectionFault0(pVCpu);
1443 }
1444 else
1445 {
1446 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1447 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1448 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1449 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1450 else
1451 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1452 if (cbToTryRead) { /* likely */ }
1453 else /* overflowed */
1454 {
1455 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1456 cbToTryRead = UINT32_MAX;
1457 }
1458 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1459 Assert(GCPtrPC <= UINT32_MAX);
1460 }
1461
1462# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1463 /* Allow interpretation of patch manager code blocks since they can for
1464 instance throw #PFs for perfectly good reasons. */
1465 if (pVCpu->iem.s.fInPatchCode)
1466 {
1467 size_t cbRead = 0;
1468 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1469 AssertRCReturn(rc, rc);
1470 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1471 return VINF_SUCCESS;
1472 }
1473# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1474
1475 RTGCPHYS GCPhys;
1476 uint64_t fFlags;
1477 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1478 if (RT_SUCCESS(rc)) { /* probable */ }
1479 else
1480 {
1481 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1482 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1483 }
1484 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1485 else
1486 {
1487 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1488 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1489 }
1490 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1491 else
1492 {
1493 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1494 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1495 }
1496 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1497 /** @todo Check reserved bits and such stuff. PGM is better at doing
1498 * that, so do it when implementing the guest virtual address
1499 * TLB... */
1500
1501 /*
1502 * Read the bytes at this address.
1503 */
1504 PVM pVM = pVCpu->CTX_SUFF(pVM);
1505# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1506 size_t cbActual;
1507 if ( PATMIsEnabled(pVM)
1508 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1509 {
1510 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1511 Assert(cbActual > 0);
1512 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1513 }
1514 else
1515# endif
1516 {
1517 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1518 if (cbToTryRead > cbLeftOnPage)
1519 cbToTryRead = cbLeftOnPage;
1520 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1521 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1522
1523 if (!pVCpu->iem.s.fBypassHandlers)
1524 {
1525 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1526 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1527 { /* likely */ }
1528 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1529 {
1530 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1531 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1532 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1533 }
1534 else
1535 {
1536 Log((RT_SUCCESS(rcStrict)
1537 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1538 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1539 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1540 return rcStrict;
1541 }
1542 }
1543 else
1544 {
1545 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1546 if (RT_SUCCESS(rc))
1547 { /* likely */ }
1548 else
1549 {
1550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1551 GCPtrPC, GCPhys, rc, cbToTryRead));
1552 return rc;
1553 }
1554 }
1555 pVCpu->iem.s.cbOpcode = cbToTryRead;
1556 }
1557#endif /* !IEM_WITH_CODE_TLB */
1558 return VINF_SUCCESS;
1559}
1560
1561
1562/**
1563 * Invalidates the IEM TLBs.
1564 *
1565 * This is called internally as well as by PGM when moving GC mappings.
1566 *
1567 * @returns
1568 * @param pVCpu The cross context virtual CPU structure of the calling
1569 * thread.
1570 * @param fVmm Set when PGM calls us with a remapping.
1571 */
1572VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1573{
1574#ifdef IEM_WITH_CODE_TLB
1575 pVCpu->iem.s.cbInstrBufTotal = 0;
1576 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1577 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1578 { /* very likely */ }
1579 else
1580 {
1581 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1582 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1583 while (i-- > 0)
1584 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1585 }
1586#endif
1587
1588#ifdef IEM_WITH_DATA_TLB
1589 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1590 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1591 { /* very likely */ }
1592 else
1593 {
1594 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1595 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1596 while (i-- > 0)
1597 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1598 }
1599#endif
1600 NOREF(pVCpu); NOREF(fVmm);
1601}
1602
1603
1604/**
1605 * Invalidates a page in the TLBs.
1606 *
1607 * @param pVCpu The cross context virtual CPU structure of the calling
1608 * thread.
1609 * @param GCPtr The address of the page to invalidate
1610 */
1611VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1612{
1613#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1614 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1615 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1616 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1617 uintptr_t idx = (uint8_t)GCPtr;
1618
1619# ifdef IEM_WITH_CODE_TLB
1620 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1621 {
1622 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1623 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1624 pVCpu->iem.s.cbInstrBufTotal = 0;
1625 }
1626# endif
1627
1628# ifdef IEM_WITH_DATA_TLB
1629 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1630 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1631# endif
1632#else
1633 NOREF(pVCpu); NOREF(GCPtr);
1634#endif
1635}
1636
1637
1638/**
1639 * Invalidates the host physical aspects of the IEM TLBs.
1640 *
1641 * This is called internally as well as by PGM when moving GC mappings.
1642 *
1643 * @param pVCpu The cross context virtual CPU structure of the calling
1644 * thread.
1645 */
1646VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1647{
1648#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1649 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1650
1651# ifdef IEM_WITH_CODE_TLB
1652 pVCpu->iem.s.cbInstrBufTotal = 0;
1653# endif
1654 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1655 if (uTlbPhysRev != 0)
1656 {
1657 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1658 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1659 }
1660 else
1661 {
1662 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1663 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1664
1665 unsigned i;
1666# ifdef IEM_WITH_CODE_TLB
1667 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1668 while (i-- > 0)
1669 {
1670 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1671 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1672 }
1673# endif
1674# ifdef IEM_WITH_DATA_TLB
1675 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1676 while (i-- > 0)
1677 {
1678 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1679 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1680 }
1681# endif
1682 }
1683#else
1684 NOREF(pVCpu);
1685#endif
1686}
1687
1688
1689/**
1690 * Invalidates the host physical aspects of the IEM TLBs.
1691 *
1692 * This is called internally as well as by PGM when moving GC mappings.
1693 *
1694 * @param pVM The cross context VM structure.
1695 *
1696 * @remarks Caller holds the PGM lock.
1697 */
1698VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1699{
1700 RT_NOREF_PV(pVM);
1701}
1702
1703#ifdef IEM_WITH_CODE_TLB
1704
1705/**
1706 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1707 * failure and jumps.
1708 *
1709 * We end up here for a number of reasons:
1710 * - pbInstrBuf isn't yet initialized.
1711 * - Advancing beyond the buffer boundrary (e.g. cross page).
1712 * - Advancing beyond the CS segment limit.
1713 * - Fetching from non-mappable page (e.g. MMIO).
1714 *
1715 * @param pVCpu The cross context virtual CPU structure of the
1716 * calling thread.
1717 * @param pvDst Where to return the bytes.
1718 * @param cbDst Number of bytes to read.
1719 *
1720 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1721 */
1722IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1723{
1724#ifdef IN_RING3
1725 for (;;)
1726 {
1727 Assert(cbDst <= 8);
1728 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1729
1730 /*
1731 * We might have a partial buffer match, deal with that first to make the
1732 * rest simpler. This is the first part of the cross page/buffer case.
1733 */
1734 if (pVCpu->iem.s.pbInstrBuf != NULL)
1735 {
1736 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1737 {
1738 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1739 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1740 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1741
1742 cbDst -= cbCopy;
1743 pvDst = (uint8_t *)pvDst + cbCopy;
1744 offBuf += cbCopy;
1745 pVCpu->iem.s.offInstrNextByte += offBuf;
1746 }
1747 }
1748
1749 /*
1750 * Check segment limit, figuring how much we're allowed to access at this point.
1751 *
1752 * We will fault immediately if RIP is past the segment limit / in non-canonical
1753 * territory. If we do continue, there are one or more bytes to read before we
1754 * end up in trouble and we need to do that first before faulting.
1755 */
1756 RTGCPTR GCPtrFirst;
1757 uint32_t cbMaxRead;
1758 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1759 {
1760 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1761 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1762 { /* likely */ }
1763 else
1764 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1765 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1766 }
1767 else
1768 {
1769 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1770 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1771 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1772 { /* likely */ }
1773 else
1774 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1775 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1776 if (cbMaxRead != 0)
1777 { /* likely */ }
1778 else
1779 {
1780 /* Overflowed because address is 0 and limit is max. */
1781 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1782 cbMaxRead = X86_PAGE_SIZE;
1783 }
1784 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1785 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1786 if (cbMaxRead2 < cbMaxRead)
1787 cbMaxRead = cbMaxRead2;
1788 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1789 }
1790
1791 /*
1792 * Get the TLB entry for this piece of code.
1793 */
1794 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1795 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1796 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1797 if (pTlbe->uTag == uTag)
1798 {
1799 /* likely when executing lots of code, otherwise unlikely */
1800# ifdef VBOX_WITH_STATISTICS
1801 pVCpu->iem.s.CodeTlb.cTlbHits++;
1802# endif
1803 }
1804 else
1805 {
1806 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1807# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1808 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1809 {
1810 pTlbe->uTag = uTag;
1811 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1812 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1813 pTlbe->GCPhys = NIL_RTGCPHYS;
1814 pTlbe->pbMappingR3 = NULL;
1815 }
1816 else
1817# endif
1818 {
1819 RTGCPHYS GCPhys;
1820 uint64_t fFlags;
1821 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1822 if (RT_FAILURE(rc))
1823 {
1824 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1825 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1826 }
1827
1828 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1829 pTlbe->uTag = uTag;
1830 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1831 pTlbe->GCPhys = GCPhys;
1832 pTlbe->pbMappingR3 = NULL;
1833 }
1834 }
1835
1836 /*
1837 * Check TLB page table level access flags.
1838 */
1839 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1840 {
1841 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1842 {
1843 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1844 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1845 }
1846 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1847 {
1848 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1849 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1850 }
1851 }
1852
1853# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1854 /*
1855 * Allow interpretation of patch manager code blocks since they can for
1856 * instance throw #PFs for perfectly good reasons.
1857 */
1858 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1859 { /* no unlikely */ }
1860 else
1861 {
1862 /** @todo Could be optimized this a little in ring-3 if we liked. */
1863 size_t cbRead = 0;
1864 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1865 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1866 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1867 return;
1868 }
1869# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1870
1871 /*
1872 * Look up the physical page info if necessary.
1873 */
1874 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1875 { /* not necessary */ }
1876 else
1877 {
1878 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1879 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1880 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1881 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1882 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1883 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1884 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1885 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1886 }
1887
1888# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1889 /*
1890 * Try do a direct read using the pbMappingR3 pointer.
1891 */
1892 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1893 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1894 {
1895 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1896 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1897 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1898 {
1899 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1900 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1901 }
1902 else
1903 {
1904 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1905 Assert(cbInstr < cbMaxRead);
1906 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1907 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1908 }
1909 if (cbDst <= cbMaxRead)
1910 {
1911 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1912 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1913 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1914 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1915 return;
1916 }
1917 pVCpu->iem.s.pbInstrBuf = NULL;
1918
1919 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1920 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1921 }
1922 else
1923# endif
1924#if 0
1925 /*
1926 * If there is no special read handling, so we can read a bit more and
1927 * put it in the prefetch buffer.
1928 */
1929 if ( cbDst < cbMaxRead
1930 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1931 {
1932 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1933 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1934 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1935 { /* likely */ }
1936 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1937 {
1938 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1939 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1940 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1941 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1942 }
1943 else
1944 {
1945 Log((RT_SUCCESS(rcStrict)
1946 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1947 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1948 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1949 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1950 }
1951 }
1952 /*
1953 * Special read handling, so only read exactly what's needed.
1954 * This is a highly unlikely scenario.
1955 */
1956 else
1957#endif
1958 {
1959 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1960 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1961 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1962 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1963 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1964 { /* likely */ }
1965 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1966 {
1967 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1968 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1969 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1970 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1971 }
1972 else
1973 {
1974 Log((RT_SUCCESS(rcStrict)
1975 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1976 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1977 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1978 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1979 }
1980 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1981 if (cbToRead == cbDst)
1982 return;
1983 }
1984
1985 /*
1986 * More to read, loop.
1987 */
1988 cbDst -= cbMaxRead;
1989 pvDst = (uint8_t *)pvDst + cbMaxRead;
1990 }
1991#else
1992 RT_NOREF(pvDst, cbDst);
1993 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1994#endif
1995}
1996
1997#else
1998
1999/**
2000 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
2001 * exception if it fails.
2002 *
2003 * @returns Strict VBox status code.
2004 * @param pVCpu The cross context virtual CPU structure of the
2005 * calling thread.
2006 * @param cbMin The minimum number of bytes relative offOpcode
2007 * that must be read.
2008 */
2009IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2010{
2011 /*
2012 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2013 *
2014 * First translate CS:rIP to a physical address.
2015 */
2016 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2017 uint32_t cbToTryRead;
2018 RTGCPTR GCPtrNext;
2019 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2020 {
2021 cbToTryRead = PAGE_SIZE;
2022 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2023 if (!IEM_IS_CANONICAL(GCPtrNext))
2024 return iemRaiseGeneralProtectionFault0(pVCpu);
2025 }
2026 else
2027 {
2028 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2029 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2030 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2031 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2032 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2033 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2034 if (!cbToTryRead) /* overflowed */
2035 {
2036 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2037 cbToTryRead = UINT32_MAX;
2038 /** @todo check out wrapping around the code segment. */
2039 }
2040 if (cbToTryRead < cbMin - cbLeft)
2041 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2042 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2043 }
2044
2045 /* Only read up to the end of the page, and make sure we don't read more
2046 than the opcode buffer can hold. */
2047 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2048 if (cbToTryRead > cbLeftOnPage)
2049 cbToTryRead = cbLeftOnPage;
2050 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2051 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2052/** @todo r=bird: Convert assertion into undefined opcode exception? */
2053 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2054
2055# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2056 /* Allow interpretation of patch manager code blocks since they can for
2057 instance throw #PFs for perfectly good reasons. */
2058 if (pVCpu->iem.s.fInPatchCode)
2059 {
2060 size_t cbRead = 0;
2061 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2062 AssertRCReturn(rc, rc);
2063 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2064 return VINF_SUCCESS;
2065 }
2066# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2067
2068 RTGCPHYS GCPhys;
2069 uint64_t fFlags;
2070 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2071 if (RT_FAILURE(rc))
2072 {
2073 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2074 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2075 }
2076 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2077 {
2078 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2079 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2080 }
2081 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2082 {
2083 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2084 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2085 }
2086 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2087 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2088 /** @todo Check reserved bits and such stuff. PGM is better at doing
2089 * that, so do it when implementing the guest virtual address
2090 * TLB... */
2091
2092 /*
2093 * Read the bytes at this address.
2094 *
2095 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2096 * and since PATM should only patch the start of an instruction there
2097 * should be no need to check again here.
2098 */
2099 if (!pVCpu->iem.s.fBypassHandlers)
2100 {
2101 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2102 cbToTryRead, PGMACCESSORIGIN_IEM);
2103 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2104 { /* likely */ }
2105 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2106 {
2107 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2108 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2109 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2110 }
2111 else
2112 {
2113 Log((RT_SUCCESS(rcStrict)
2114 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2115 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2116 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2117 return rcStrict;
2118 }
2119 }
2120 else
2121 {
2122 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2123 if (RT_SUCCESS(rc))
2124 { /* likely */ }
2125 else
2126 {
2127 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2128 return rc;
2129 }
2130 }
2131 pVCpu->iem.s.cbOpcode += cbToTryRead;
2132 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2133
2134 return VINF_SUCCESS;
2135}
2136
2137#endif /* !IEM_WITH_CODE_TLB */
2138#ifndef IEM_WITH_SETJMP
2139
2140/**
2141 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2142 *
2143 * @returns Strict VBox status code.
2144 * @param pVCpu The cross context virtual CPU structure of the
2145 * calling thread.
2146 * @param pb Where to return the opcode byte.
2147 */
2148DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2149{
2150 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2151 if (rcStrict == VINF_SUCCESS)
2152 {
2153 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2154 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2155 pVCpu->iem.s.offOpcode = offOpcode + 1;
2156 }
2157 else
2158 *pb = 0;
2159 return rcStrict;
2160}
2161
2162
2163/**
2164 * Fetches the next opcode byte.
2165 *
2166 * @returns Strict VBox status code.
2167 * @param pVCpu The cross context virtual CPU structure of the
2168 * calling thread.
2169 * @param pu8 Where to return the opcode byte.
2170 */
2171DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2172{
2173 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2174 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2175 {
2176 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2177 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2178 return VINF_SUCCESS;
2179 }
2180 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2181}
2182
2183#else /* IEM_WITH_SETJMP */
2184
2185/**
2186 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2187 *
2188 * @returns The opcode byte.
2189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2190 */
2191DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2192{
2193# ifdef IEM_WITH_CODE_TLB
2194 uint8_t u8;
2195 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2196 return u8;
2197# else
2198 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2199 if (rcStrict == VINF_SUCCESS)
2200 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2201 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2202# endif
2203}
2204
2205
2206/**
2207 * Fetches the next opcode byte, longjmp on error.
2208 *
2209 * @returns The opcode byte.
2210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2211 */
2212DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2213{
2214# ifdef IEM_WITH_CODE_TLB
2215 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2216 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2217 if (RT_LIKELY( pbBuf != NULL
2218 && offBuf < pVCpu->iem.s.cbInstrBuf))
2219 {
2220 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2221 return pbBuf[offBuf];
2222 }
2223# else
2224 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2225 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2226 {
2227 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2228 return pVCpu->iem.s.abOpcode[offOpcode];
2229 }
2230# endif
2231 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2232}
2233
2234#endif /* IEM_WITH_SETJMP */
2235
2236/**
2237 * Fetches the next opcode byte, returns automatically on failure.
2238 *
2239 * @param a_pu8 Where to return the opcode byte.
2240 * @remark Implicitly references pVCpu.
2241 */
2242#ifndef IEM_WITH_SETJMP
2243# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2244 do \
2245 { \
2246 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2247 if (rcStrict2 == VINF_SUCCESS) \
2248 { /* likely */ } \
2249 else \
2250 return rcStrict2; \
2251 } while (0)
2252#else
2253# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2254#endif /* IEM_WITH_SETJMP */
2255
2256
2257#ifndef IEM_WITH_SETJMP
2258/**
2259 * Fetches the next signed byte from the opcode stream.
2260 *
2261 * @returns Strict VBox status code.
2262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2263 * @param pi8 Where to return the signed byte.
2264 */
2265DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2266{
2267 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2268}
2269#endif /* !IEM_WITH_SETJMP */
2270
2271
2272/**
2273 * Fetches the next signed byte from the opcode stream, returning automatically
2274 * on failure.
2275 *
2276 * @param a_pi8 Where to return the signed byte.
2277 * @remark Implicitly references pVCpu.
2278 */
2279#ifndef IEM_WITH_SETJMP
2280# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2281 do \
2282 { \
2283 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2284 if (rcStrict2 != VINF_SUCCESS) \
2285 return rcStrict2; \
2286 } while (0)
2287#else /* IEM_WITH_SETJMP */
2288# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2289
2290#endif /* IEM_WITH_SETJMP */
2291
2292#ifndef IEM_WITH_SETJMP
2293
2294/**
2295 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2296 *
2297 * @returns Strict VBox status code.
2298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2299 * @param pu16 Where to return the opcode dword.
2300 */
2301DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2302{
2303 uint8_t u8;
2304 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2305 if (rcStrict == VINF_SUCCESS)
2306 *pu16 = (int8_t)u8;
2307 return rcStrict;
2308}
2309
2310
2311/**
2312 * Fetches the next signed byte from the opcode stream, extending it to
2313 * unsigned 16-bit.
2314 *
2315 * @returns Strict VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param pu16 Where to return the unsigned word.
2318 */
2319DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2320{
2321 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2322 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2323 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2324
2325 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2326 pVCpu->iem.s.offOpcode = offOpcode + 1;
2327 return VINF_SUCCESS;
2328}
2329
2330#endif /* !IEM_WITH_SETJMP */
2331
2332/**
2333 * Fetches the next signed byte from the opcode stream and sign-extending it to
2334 * a word, returning automatically on failure.
2335 *
2336 * @param a_pu16 Where to return the word.
2337 * @remark Implicitly references pVCpu.
2338 */
2339#ifndef IEM_WITH_SETJMP
2340# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2341 do \
2342 { \
2343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2344 if (rcStrict2 != VINF_SUCCESS) \
2345 return rcStrict2; \
2346 } while (0)
2347#else
2348# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2349#endif
2350
2351#ifndef IEM_WITH_SETJMP
2352
2353/**
2354 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2355 *
2356 * @returns Strict VBox status code.
2357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2358 * @param pu32 Where to return the opcode dword.
2359 */
2360DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2361{
2362 uint8_t u8;
2363 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2364 if (rcStrict == VINF_SUCCESS)
2365 *pu32 = (int8_t)u8;
2366 return rcStrict;
2367}
2368
2369
2370/**
2371 * Fetches the next signed byte from the opcode stream, extending it to
2372 * unsigned 32-bit.
2373 *
2374 * @returns Strict VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2376 * @param pu32 Where to return the unsigned dword.
2377 */
2378DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2379{
2380 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2381 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2382 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2383
2384 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2385 pVCpu->iem.s.offOpcode = offOpcode + 1;
2386 return VINF_SUCCESS;
2387}
2388
2389#endif /* !IEM_WITH_SETJMP */
2390
2391/**
2392 * Fetches the next signed byte from the opcode stream and sign-extending it to
2393 * a word, returning automatically on failure.
2394 *
2395 * @param a_pu32 Where to return the word.
2396 * @remark Implicitly references pVCpu.
2397 */
2398#ifndef IEM_WITH_SETJMP
2399#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2400 do \
2401 { \
2402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2403 if (rcStrict2 != VINF_SUCCESS) \
2404 return rcStrict2; \
2405 } while (0)
2406#else
2407# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2408#endif
2409
2410#ifndef IEM_WITH_SETJMP
2411
2412/**
2413 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2414 *
2415 * @returns Strict VBox status code.
2416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2417 * @param pu64 Where to return the opcode qword.
2418 */
2419DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2420{
2421 uint8_t u8;
2422 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2423 if (rcStrict == VINF_SUCCESS)
2424 *pu64 = (int8_t)u8;
2425 return rcStrict;
2426}
2427
2428
2429/**
2430 * Fetches the next signed byte from the opcode stream, extending it to
2431 * unsigned 64-bit.
2432 *
2433 * @returns Strict VBox status code.
2434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2435 * @param pu64 Where to return the unsigned qword.
2436 */
2437DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2438{
2439 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2440 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2441 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2442
2443 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2444 pVCpu->iem.s.offOpcode = offOpcode + 1;
2445 return VINF_SUCCESS;
2446}
2447
2448#endif /* !IEM_WITH_SETJMP */
2449
2450
2451/**
2452 * Fetches the next signed byte from the opcode stream and sign-extending it to
2453 * a word, returning automatically on failure.
2454 *
2455 * @param a_pu64 Where to return the word.
2456 * @remark Implicitly references pVCpu.
2457 */
2458#ifndef IEM_WITH_SETJMP
2459# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2460 do \
2461 { \
2462 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2463 if (rcStrict2 != VINF_SUCCESS) \
2464 return rcStrict2; \
2465 } while (0)
2466#else
2467# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2468#endif
2469
2470
2471#ifndef IEM_WITH_SETJMP
2472/**
2473 * Fetches the next opcode byte.
2474 *
2475 * @returns Strict VBox status code.
2476 * @param pVCpu The cross context virtual CPU structure of the
2477 * calling thread.
2478 * @param pu8 Where to return the opcode byte.
2479 */
2480DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2481{
2482 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2483 pVCpu->iem.s.offModRm = offOpcode;
2484 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2485 {
2486 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2487 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2488 return VINF_SUCCESS;
2489 }
2490 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2491}
2492#else /* IEM_WITH_SETJMP */
2493/**
2494 * Fetches the next opcode byte, longjmp on error.
2495 *
2496 * @returns The opcode byte.
2497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2498 */
2499DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2500{
2501# ifdef IEM_WITH_CODE_TLB
2502 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2503 pVCpu->iem.s.offModRm = offBuf;
2504 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2505 if (RT_LIKELY( pbBuf != NULL
2506 && offBuf < pVCpu->iem.s.cbInstrBuf))
2507 {
2508 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2509 return pbBuf[offBuf];
2510 }
2511# else
2512 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2513 pVCpu->iem.s.offModRm = offOpcode;
2514 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2515 {
2516 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2517 return pVCpu->iem.s.abOpcode[offOpcode];
2518 }
2519# endif
2520 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2521}
2522#endif /* IEM_WITH_SETJMP */
2523
2524/**
2525 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2526 * on failure.
2527 *
2528 * Will note down the position of the ModR/M byte for VT-x exits.
2529 *
2530 * @param a_pbRm Where to return the RM opcode byte.
2531 * @remark Implicitly references pVCpu.
2532 */
2533#ifndef IEM_WITH_SETJMP
2534# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2535 do \
2536 { \
2537 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2538 if (rcStrict2 == VINF_SUCCESS) \
2539 { /* likely */ } \
2540 else \
2541 return rcStrict2; \
2542 } while (0)
2543#else
2544# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2545#endif /* IEM_WITH_SETJMP */
2546
2547
2548#ifndef IEM_WITH_SETJMP
2549
2550/**
2551 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2552 *
2553 * @returns Strict VBox status code.
2554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2555 * @param pu16 Where to return the opcode word.
2556 */
2557DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2558{
2559 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2560 if (rcStrict == VINF_SUCCESS)
2561 {
2562 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2563# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2564 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2565# else
2566 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2567# endif
2568 pVCpu->iem.s.offOpcode = offOpcode + 2;
2569 }
2570 else
2571 *pu16 = 0;
2572 return rcStrict;
2573}
2574
2575
2576/**
2577 * Fetches the next opcode word.
2578 *
2579 * @returns Strict VBox status code.
2580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2581 * @param pu16 Where to return the opcode word.
2582 */
2583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2584{
2585 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2586 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2587 {
2588 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2589# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2590 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2591# else
2592 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2593# endif
2594 return VINF_SUCCESS;
2595 }
2596 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2597}
2598
2599#else /* IEM_WITH_SETJMP */
2600
2601/**
2602 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2603 *
2604 * @returns The opcode word.
2605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2606 */
2607DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2608{
2609# ifdef IEM_WITH_CODE_TLB
2610 uint16_t u16;
2611 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2612 return u16;
2613# else
2614 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2615 if (rcStrict == VINF_SUCCESS)
2616 {
2617 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2618 pVCpu->iem.s.offOpcode += 2;
2619# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2620 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2621# else
2622 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2623# endif
2624 }
2625 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2626# endif
2627}
2628
2629
2630/**
2631 * Fetches the next opcode word, longjmp on error.
2632 *
2633 * @returns The opcode word.
2634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2635 */
2636DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2637{
2638# ifdef IEM_WITH_CODE_TLB
2639 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2640 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2641 if (RT_LIKELY( pbBuf != NULL
2642 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2643 {
2644 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2645# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2646 return *(uint16_t const *)&pbBuf[offBuf];
2647# else
2648 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2649# endif
2650 }
2651# else
2652 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2654 {
2655 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2657 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2658# else
2659 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2660# endif
2661 }
2662# endif
2663 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2664}
2665
2666#endif /* IEM_WITH_SETJMP */
2667
2668
2669/**
2670 * Fetches the next opcode word, returns automatically on failure.
2671 *
2672 * @param a_pu16 Where to return the opcode word.
2673 * @remark Implicitly references pVCpu.
2674 */
2675#ifndef IEM_WITH_SETJMP
2676# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2677 do \
2678 { \
2679 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2680 if (rcStrict2 != VINF_SUCCESS) \
2681 return rcStrict2; \
2682 } while (0)
2683#else
2684# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2685#endif
2686
2687#ifndef IEM_WITH_SETJMP
2688
2689/**
2690 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2691 *
2692 * @returns Strict VBox status code.
2693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2694 * @param pu32 Where to return the opcode double word.
2695 */
2696DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2697{
2698 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2699 if (rcStrict == VINF_SUCCESS)
2700 {
2701 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2702 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2703 pVCpu->iem.s.offOpcode = offOpcode + 2;
2704 }
2705 else
2706 *pu32 = 0;
2707 return rcStrict;
2708}
2709
2710
2711/**
2712 * Fetches the next opcode word, zero extending it to a double word.
2713 *
2714 * @returns Strict VBox status code.
2715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2716 * @param pu32 Where to return the opcode double word.
2717 */
2718DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2719{
2720 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2721 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2722 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2723
2724 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2725 pVCpu->iem.s.offOpcode = offOpcode + 2;
2726 return VINF_SUCCESS;
2727}
2728
2729#endif /* !IEM_WITH_SETJMP */
2730
2731
2732/**
2733 * Fetches the next opcode word and zero extends it to a double word, returns
2734 * automatically on failure.
2735 *
2736 * @param a_pu32 Where to return the opcode double word.
2737 * @remark Implicitly references pVCpu.
2738 */
2739#ifndef IEM_WITH_SETJMP
2740# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2741 do \
2742 { \
2743 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2744 if (rcStrict2 != VINF_SUCCESS) \
2745 return rcStrict2; \
2746 } while (0)
2747#else
2748# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2749#endif
2750
2751#ifndef IEM_WITH_SETJMP
2752
2753/**
2754 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2755 *
2756 * @returns Strict VBox status code.
2757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2758 * @param pu64 Where to return the opcode quad word.
2759 */
2760DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2761{
2762 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2763 if (rcStrict == VINF_SUCCESS)
2764 {
2765 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2766 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2767 pVCpu->iem.s.offOpcode = offOpcode + 2;
2768 }
2769 else
2770 *pu64 = 0;
2771 return rcStrict;
2772}
2773
2774
2775/**
2776 * Fetches the next opcode word, zero extending it to a quad word.
2777 *
2778 * @returns Strict VBox status code.
2779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2780 * @param pu64 Where to return the opcode quad word.
2781 */
2782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2783{
2784 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2785 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2786 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2787
2788 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2789 pVCpu->iem.s.offOpcode = offOpcode + 2;
2790 return VINF_SUCCESS;
2791}
2792
2793#endif /* !IEM_WITH_SETJMP */
2794
2795/**
2796 * Fetches the next opcode word and zero extends it to a quad word, returns
2797 * automatically on failure.
2798 *
2799 * @param a_pu64 Where to return the opcode quad word.
2800 * @remark Implicitly references pVCpu.
2801 */
2802#ifndef IEM_WITH_SETJMP
2803# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2804 do \
2805 { \
2806 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2807 if (rcStrict2 != VINF_SUCCESS) \
2808 return rcStrict2; \
2809 } while (0)
2810#else
2811# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2812#endif
2813
2814
2815#ifndef IEM_WITH_SETJMP
2816/**
2817 * Fetches the next signed word from the opcode stream.
2818 *
2819 * @returns Strict VBox status code.
2820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2821 * @param pi16 Where to return the signed word.
2822 */
2823DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2824{
2825 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2826}
2827#endif /* !IEM_WITH_SETJMP */
2828
2829
2830/**
2831 * Fetches the next signed word from the opcode stream, returning automatically
2832 * on failure.
2833 *
2834 * @param a_pi16 Where to return the signed word.
2835 * @remark Implicitly references pVCpu.
2836 */
2837#ifndef IEM_WITH_SETJMP
2838# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2839 do \
2840 { \
2841 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2842 if (rcStrict2 != VINF_SUCCESS) \
2843 return rcStrict2; \
2844 } while (0)
2845#else
2846# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2847#endif
2848
2849#ifndef IEM_WITH_SETJMP
2850
2851/**
2852 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2853 *
2854 * @returns Strict VBox status code.
2855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2856 * @param pu32 Where to return the opcode dword.
2857 */
2858DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2859{
2860 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2861 if (rcStrict == VINF_SUCCESS)
2862 {
2863 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2864# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2865 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2866# else
2867 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2868 pVCpu->iem.s.abOpcode[offOpcode + 1],
2869 pVCpu->iem.s.abOpcode[offOpcode + 2],
2870 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2871# endif
2872 pVCpu->iem.s.offOpcode = offOpcode + 4;
2873 }
2874 else
2875 *pu32 = 0;
2876 return rcStrict;
2877}
2878
2879
2880/**
2881 * Fetches the next opcode dword.
2882 *
2883 * @returns Strict VBox status code.
2884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2885 * @param pu32 Where to return the opcode double word.
2886 */
2887DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2888{
2889 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2890 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2891 {
2892 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2893# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2894 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2895# else
2896 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2897 pVCpu->iem.s.abOpcode[offOpcode + 1],
2898 pVCpu->iem.s.abOpcode[offOpcode + 2],
2899 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2900# endif
2901 return VINF_SUCCESS;
2902 }
2903 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2904}
2905
2906#else /* !IEM_WITH_SETJMP */
2907
2908/**
2909 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2910 *
2911 * @returns The opcode dword.
2912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2913 */
2914DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2915{
2916# ifdef IEM_WITH_CODE_TLB
2917 uint32_t u32;
2918 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2919 return u32;
2920# else
2921 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2922 if (rcStrict == VINF_SUCCESS)
2923 {
2924 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2925 pVCpu->iem.s.offOpcode = offOpcode + 4;
2926# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2927 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2928# else
2929 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2930 pVCpu->iem.s.abOpcode[offOpcode + 1],
2931 pVCpu->iem.s.abOpcode[offOpcode + 2],
2932 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2933# endif
2934 }
2935 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2936# endif
2937}
2938
2939
2940/**
2941 * Fetches the next opcode dword, longjmp on error.
2942 *
2943 * @returns The opcode dword.
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 */
2946DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2947{
2948# ifdef IEM_WITH_CODE_TLB
2949 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2950 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2951 if (RT_LIKELY( pbBuf != NULL
2952 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2953 {
2954 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2955# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2956 return *(uint32_t const *)&pbBuf[offBuf];
2957# else
2958 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2959 pbBuf[offBuf + 1],
2960 pbBuf[offBuf + 2],
2961 pbBuf[offBuf + 3]);
2962# endif
2963 }
2964# else
2965 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2966 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2967 {
2968 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2969# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2970 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2971# else
2972 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2973 pVCpu->iem.s.abOpcode[offOpcode + 1],
2974 pVCpu->iem.s.abOpcode[offOpcode + 2],
2975 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2976# endif
2977 }
2978# endif
2979 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2980}
2981
2982#endif /* !IEM_WITH_SETJMP */
2983
2984
2985/**
2986 * Fetches the next opcode dword, returns automatically on failure.
2987 *
2988 * @param a_pu32 Where to return the opcode dword.
2989 * @remark Implicitly references pVCpu.
2990 */
2991#ifndef IEM_WITH_SETJMP
2992# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2993 do \
2994 { \
2995 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2996 if (rcStrict2 != VINF_SUCCESS) \
2997 return rcStrict2; \
2998 } while (0)
2999#else
3000# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
3001#endif
3002
3003#ifndef IEM_WITH_SETJMP
3004
3005/**
3006 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3007 *
3008 * @returns Strict VBox status code.
3009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3010 * @param pu64 Where to return the opcode dword.
3011 */
3012DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3013{
3014 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3015 if (rcStrict == VINF_SUCCESS)
3016 {
3017 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3018 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3019 pVCpu->iem.s.abOpcode[offOpcode + 1],
3020 pVCpu->iem.s.abOpcode[offOpcode + 2],
3021 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3022 pVCpu->iem.s.offOpcode = offOpcode + 4;
3023 }
3024 else
3025 *pu64 = 0;
3026 return rcStrict;
3027}
3028
3029
3030/**
3031 * Fetches the next opcode dword, zero extending it to a quad word.
3032 *
3033 * @returns Strict VBox status code.
3034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3035 * @param pu64 Where to return the opcode quad word.
3036 */
3037DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3038{
3039 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3040 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3041 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3042
3043 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3044 pVCpu->iem.s.abOpcode[offOpcode + 1],
3045 pVCpu->iem.s.abOpcode[offOpcode + 2],
3046 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3047 pVCpu->iem.s.offOpcode = offOpcode + 4;
3048 return VINF_SUCCESS;
3049}
3050
3051#endif /* !IEM_WITH_SETJMP */
3052
3053
3054/**
3055 * Fetches the next opcode dword and zero extends it to a quad word, returns
3056 * automatically on failure.
3057 *
3058 * @param a_pu64 Where to return the opcode quad word.
3059 * @remark Implicitly references pVCpu.
3060 */
3061#ifndef IEM_WITH_SETJMP
3062# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3063 do \
3064 { \
3065 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3066 if (rcStrict2 != VINF_SUCCESS) \
3067 return rcStrict2; \
3068 } while (0)
3069#else
3070# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3071#endif
3072
3073
3074#ifndef IEM_WITH_SETJMP
3075/**
3076 * Fetches the next signed double word from the opcode stream.
3077 *
3078 * @returns Strict VBox status code.
3079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3080 * @param pi32 Where to return the signed double word.
3081 */
3082DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3083{
3084 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3085}
3086#endif
3087
3088/**
3089 * Fetches the next signed double word from the opcode stream, returning
3090 * automatically on failure.
3091 *
3092 * @param a_pi32 Where to return the signed double word.
3093 * @remark Implicitly references pVCpu.
3094 */
3095#ifndef IEM_WITH_SETJMP
3096# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3097 do \
3098 { \
3099 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3100 if (rcStrict2 != VINF_SUCCESS) \
3101 return rcStrict2; \
3102 } while (0)
3103#else
3104# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3105#endif
3106
3107#ifndef IEM_WITH_SETJMP
3108
3109/**
3110 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3111 *
3112 * @returns Strict VBox status code.
3113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3114 * @param pu64 Where to return the opcode qword.
3115 */
3116DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3117{
3118 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3119 if (rcStrict == VINF_SUCCESS)
3120 {
3121 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3122 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3123 pVCpu->iem.s.abOpcode[offOpcode + 1],
3124 pVCpu->iem.s.abOpcode[offOpcode + 2],
3125 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3126 pVCpu->iem.s.offOpcode = offOpcode + 4;
3127 }
3128 else
3129 *pu64 = 0;
3130 return rcStrict;
3131}
3132
3133
3134/**
3135 * Fetches the next opcode dword, sign extending it into a quad word.
3136 *
3137 * @returns Strict VBox status code.
3138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3139 * @param pu64 Where to return the opcode quad word.
3140 */
3141DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3142{
3143 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3144 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3145 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3146
3147 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3151 *pu64 = i32;
3152 pVCpu->iem.s.offOpcode = offOpcode + 4;
3153 return VINF_SUCCESS;
3154}
3155
3156#endif /* !IEM_WITH_SETJMP */
3157
3158
3159/**
3160 * Fetches the next opcode double word and sign extends it to a quad word,
3161 * returns automatically on failure.
3162 *
3163 * @param a_pu64 Where to return the opcode quad word.
3164 * @remark Implicitly references pVCpu.
3165 */
3166#ifndef IEM_WITH_SETJMP
3167# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3168 do \
3169 { \
3170 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3171 if (rcStrict2 != VINF_SUCCESS) \
3172 return rcStrict2; \
3173 } while (0)
3174#else
3175# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3176#endif
3177
3178#ifndef IEM_WITH_SETJMP
3179
3180/**
3181 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3182 *
3183 * @returns Strict VBox status code.
3184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3185 * @param pu64 Where to return the opcode qword.
3186 */
3187DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3188{
3189 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3190 if (rcStrict == VINF_SUCCESS)
3191 {
3192 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3193# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3194 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3195# else
3196 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3197 pVCpu->iem.s.abOpcode[offOpcode + 1],
3198 pVCpu->iem.s.abOpcode[offOpcode + 2],
3199 pVCpu->iem.s.abOpcode[offOpcode + 3],
3200 pVCpu->iem.s.abOpcode[offOpcode + 4],
3201 pVCpu->iem.s.abOpcode[offOpcode + 5],
3202 pVCpu->iem.s.abOpcode[offOpcode + 6],
3203 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3204# endif
3205 pVCpu->iem.s.offOpcode = offOpcode + 8;
3206 }
3207 else
3208 *pu64 = 0;
3209 return rcStrict;
3210}
3211
3212
3213/**
3214 * Fetches the next opcode qword.
3215 *
3216 * @returns Strict VBox status code.
3217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3218 * @param pu64 Where to return the opcode qword.
3219 */
3220DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3221{
3222 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3223 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3224 {
3225# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3226 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3227# else
3228 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3229 pVCpu->iem.s.abOpcode[offOpcode + 1],
3230 pVCpu->iem.s.abOpcode[offOpcode + 2],
3231 pVCpu->iem.s.abOpcode[offOpcode + 3],
3232 pVCpu->iem.s.abOpcode[offOpcode + 4],
3233 pVCpu->iem.s.abOpcode[offOpcode + 5],
3234 pVCpu->iem.s.abOpcode[offOpcode + 6],
3235 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3236# endif
3237 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3238 return VINF_SUCCESS;
3239 }
3240 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3241}
3242
3243#else /* IEM_WITH_SETJMP */
3244
3245/**
3246 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3247 *
3248 * @returns The opcode qword.
3249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3250 */
3251DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3252{
3253# ifdef IEM_WITH_CODE_TLB
3254 uint64_t u64;
3255 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3256 return u64;
3257# else
3258 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3259 if (rcStrict == VINF_SUCCESS)
3260 {
3261 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3262 pVCpu->iem.s.offOpcode = offOpcode + 8;
3263# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3264 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3265# else
3266 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3267 pVCpu->iem.s.abOpcode[offOpcode + 1],
3268 pVCpu->iem.s.abOpcode[offOpcode + 2],
3269 pVCpu->iem.s.abOpcode[offOpcode + 3],
3270 pVCpu->iem.s.abOpcode[offOpcode + 4],
3271 pVCpu->iem.s.abOpcode[offOpcode + 5],
3272 pVCpu->iem.s.abOpcode[offOpcode + 6],
3273 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3274# endif
3275 }
3276 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3277# endif
3278}
3279
3280
3281/**
3282 * Fetches the next opcode qword, longjmp on error.
3283 *
3284 * @returns The opcode qword.
3285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3286 */
3287DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3288{
3289# ifdef IEM_WITH_CODE_TLB
3290 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3291 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3292 if (RT_LIKELY( pbBuf != NULL
3293 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3294 {
3295 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3296# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3297 return *(uint64_t const *)&pbBuf[offBuf];
3298# else
3299 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3300 pbBuf[offBuf + 1],
3301 pbBuf[offBuf + 2],
3302 pbBuf[offBuf + 3],
3303 pbBuf[offBuf + 4],
3304 pbBuf[offBuf + 5],
3305 pbBuf[offBuf + 6],
3306 pbBuf[offBuf + 7]);
3307# endif
3308 }
3309# else
3310 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3311 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3312 {
3313 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3314# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3315 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3316# else
3317 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3318 pVCpu->iem.s.abOpcode[offOpcode + 1],
3319 pVCpu->iem.s.abOpcode[offOpcode + 2],
3320 pVCpu->iem.s.abOpcode[offOpcode + 3],
3321 pVCpu->iem.s.abOpcode[offOpcode + 4],
3322 pVCpu->iem.s.abOpcode[offOpcode + 5],
3323 pVCpu->iem.s.abOpcode[offOpcode + 6],
3324 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3325# endif
3326 }
3327# endif
3328 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3329}
3330
3331#endif /* IEM_WITH_SETJMP */
3332
3333/**
3334 * Fetches the next opcode quad word, returns automatically on failure.
3335 *
3336 * @param a_pu64 Where to return the opcode quad word.
3337 * @remark Implicitly references pVCpu.
3338 */
3339#ifndef IEM_WITH_SETJMP
3340# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3341 do \
3342 { \
3343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3344 if (rcStrict2 != VINF_SUCCESS) \
3345 return rcStrict2; \
3346 } while (0)
3347#else
3348# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3349#endif
3350
3351
3352/** @name Misc Worker Functions.
3353 * @{
3354 */
3355
3356/**
3357 * Gets the exception class for the specified exception vector.
3358 *
3359 * @returns The class of the specified exception.
3360 * @param uVector The exception vector.
3361 */
3362IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3363{
3364 Assert(uVector <= X86_XCPT_LAST);
3365 switch (uVector)
3366 {
3367 case X86_XCPT_DE:
3368 case X86_XCPT_TS:
3369 case X86_XCPT_NP:
3370 case X86_XCPT_SS:
3371 case X86_XCPT_GP:
3372 case X86_XCPT_SX: /* AMD only */
3373 return IEMXCPTCLASS_CONTRIBUTORY;
3374
3375 case X86_XCPT_PF:
3376 case X86_XCPT_VE: /* Intel only */
3377 return IEMXCPTCLASS_PAGE_FAULT;
3378
3379 case X86_XCPT_DF:
3380 return IEMXCPTCLASS_DOUBLE_FAULT;
3381 }
3382 return IEMXCPTCLASS_BENIGN;
3383}
3384
3385
3386/**
3387 * Evaluates how to handle an exception caused during delivery of another event
3388 * (exception / interrupt).
3389 *
3390 * @returns How to handle the recursive exception.
3391 * @param pVCpu The cross context virtual CPU structure of the
3392 * calling thread.
3393 * @param fPrevFlags The flags of the previous event.
3394 * @param uPrevVector The vector of the previous event.
3395 * @param fCurFlags The flags of the current exception.
3396 * @param uCurVector The vector of the current exception.
3397 * @param pfXcptRaiseInfo Where to store additional information about the
3398 * exception condition. Optional.
3399 */
3400VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3401 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3402{
3403 /*
3404 * Only CPU exceptions can be raised while delivering other events, software interrupt
3405 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3406 */
3407 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3408 Assert(pVCpu); RT_NOREF(pVCpu);
3409 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3410
3411 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3412 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3413 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3414 {
3415 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3416 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3417 {
3418 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3419 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3420 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3421 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3422 {
3423 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3424 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3425 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3426 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3427 uCurVector, pVCpu->cpum.GstCtx.cr2));
3428 }
3429 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3430 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3431 {
3432 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3433 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3434 }
3435 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3436 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3437 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3438 {
3439 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3440 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3441 }
3442 }
3443 else
3444 {
3445 if (uPrevVector == X86_XCPT_NMI)
3446 {
3447 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3448 if (uCurVector == X86_XCPT_PF)
3449 {
3450 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3451 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3452 }
3453 }
3454 else if ( uPrevVector == X86_XCPT_AC
3455 && uCurVector == X86_XCPT_AC)
3456 {
3457 enmRaise = IEMXCPTRAISE_CPU_HANG;
3458 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3459 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3460 }
3461 }
3462 }
3463 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3464 {
3465 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3466 if (uCurVector == X86_XCPT_PF)
3467 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3468 }
3469 else
3470 {
3471 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3472 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3473 }
3474
3475 if (pfXcptRaiseInfo)
3476 *pfXcptRaiseInfo = fRaiseInfo;
3477 return enmRaise;
3478}
3479
3480
3481/**
3482 * Enters the CPU shutdown state initiated by a triple fault or other
3483 * unrecoverable conditions.
3484 *
3485 * @returns Strict VBox status code.
3486 * @param pVCpu The cross context virtual CPU structure of the
3487 * calling thread.
3488 */
3489IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3490{
3491 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3492 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3493
3494 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3495 {
3496 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3497 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3498 }
3499
3500 RT_NOREF(pVCpu);
3501 return VINF_EM_TRIPLE_FAULT;
3502}
3503
3504
3505/**
3506 * Validates a new SS segment.
3507 *
3508 * @returns VBox strict status code.
3509 * @param pVCpu The cross context virtual CPU structure of the
3510 * calling thread.
3511 * @param NewSS The new SS selctor.
3512 * @param uCpl The CPL to load the stack for.
3513 * @param pDesc Where to return the descriptor.
3514 */
3515IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3516{
3517 /* Null selectors are not allowed (we're not called for dispatching
3518 interrupts with SS=0 in long mode). */
3519 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3520 {
3521 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3522 return iemRaiseTaskSwitchFault0(pVCpu);
3523 }
3524
3525 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3526 if ((NewSS & X86_SEL_RPL) != uCpl)
3527 {
3528 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3529 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3530 }
3531
3532 /*
3533 * Read the descriptor.
3534 */
3535 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3536 if (rcStrict != VINF_SUCCESS)
3537 return rcStrict;
3538
3539 /*
3540 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3541 */
3542 if (!pDesc->Legacy.Gen.u1DescType)
3543 {
3544 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3545 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3546 }
3547
3548 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3549 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3550 {
3551 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3552 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3553 }
3554 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3555 {
3556 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3557 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3558 }
3559
3560 /* Is it there? */
3561 /** @todo testcase: Is this checked before the canonical / limit check below? */
3562 if (!pDesc->Legacy.Gen.u1Present)
3563 {
3564 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3565 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3566 }
3567
3568 return VINF_SUCCESS;
3569}
3570
3571
3572/**
3573 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3574 * not.
3575 *
3576 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3577 */
3578#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3579# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3580#else
3581# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3582#endif
3583
3584/**
3585 * Updates the EFLAGS in the correct manner wrt. PATM.
3586 *
3587 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3588 * @param a_fEfl The new EFLAGS.
3589 */
3590#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3591# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3592#else
3593# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3594#endif
3595
3596
3597/** @} */
3598
3599/** @name Raising Exceptions.
3600 *
3601 * @{
3602 */
3603
3604
3605/**
3606 * Loads the specified stack far pointer from the TSS.
3607 *
3608 * @returns VBox strict status code.
3609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3610 * @param uCpl The CPL to load the stack for.
3611 * @param pSelSS Where to return the new stack segment.
3612 * @param puEsp Where to return the new stack pointer.
3613 */
3614IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3615{
3616 VBOXSTRICTRC rcStrict;
3617 Assert(uCpl < 4);
3618
3619 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3620 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3621 {
3622 /*
3623 * 16-bit TSS (X86TSS16).
3624 */
3625 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3626 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3627 {
3628 uint32_t off = uCpl * 4 + 2;
3629 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3630 {
3631 /** @todo check actual access pattern here. */
3632 uint32_t u32Tmp = 0; /* gcc maybe... */
3633 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3634 if (rcStrict == VINF_SUCCESS)
3635 {
3636 *puEsp = RT_LOWORD(u32Tmp);
3637 *pSelSS = RT_HIWORD(u32Tmp);
3638 return VINF_SUCCESS;
3639 }
3640 }
3641 else
3642 {
3643 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3644 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3645 }
3646 break;
3647 }
3648
3649 /*
3650 * 32-bit TSS (X86TSS32).
3651 */
3652 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3653 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3654 {
3655 uint32_t off = uCpl * 8 + 4;
3656 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3657 {
3658/** @todo check actual access pattern here. */
3659 uint64_t u64Tmp;
3660 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3661 if (rcStrict == VINF_SUCCESS)
3662 {
3663 *puEsp = u64Tmp & UINT32_MAX;
3664 *pSelSS = (RTSEL)(u64Tmp >> 32);
3665 return VINF_SUCCESS;
3666 }
3667 }
3668 else
3669 {
3670 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3671 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3672 }
3673 break;
3674 }
3675
3676 default:
3677 AssertFailed();
3678 rcStrict = VERR_IEM_IPE_4;
3679 break;
3680 }
3681
3682 *puEsp = 0; /* make gcc happy */
3683 *pSelSS = 0; /* make gcc happy */
3684 return rcStrict;
3685}
3686
3687
3688/**
3689 * Loads the specified stack pointer from the 64-bit TSS.
3690 *
3691 * @returns VBox strict status code.
3692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3693 * @param uCpl The CPL to load the stack for.
3694 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3695 * @param puRsp Where to return the new stack pointer.
3696 */
3697IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3698{
3699 Assert(uCpl < 4);
3700 Assert(uIst < 8);
3701 *puRsp = 0; /* make gcc happy */
3702
3703 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3704 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3705
3706 uint32_t off;
3707 if (uIst)
3708 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3709 else
3710 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3711 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3712 {
3713 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3714 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3715 }
3716
3717 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3718}
3719
3720
3721/**
3722 * Adjust the CPU state according to the exception being raised.
3723 *
3724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3725 * @param u8Vector The exception that has been raised.
3726 */
3727DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3728{
3729 switch (u8Vector)
3730 {
3731 case X86_XCPT_DB:
3732 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3733 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3734 break;
3735 /** @todo Read the AMD and Intel exception reference... */
3736 }
3737}
3738
3739
3740/**
3741 * Implements exceptions and interrupts for real mode.
3742 *
3743 * @returns VBox strict status code.
3744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3745 * @param cbInstr The number of bytes to offset rIP by in the return
3746 * address.
3747 * @param u8Vector The interrupt / exception vector number.
3748 * @param fFlags The flags.
3749 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3750 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3751 */
3752IEM_STATIC VBOXSTRICTRC
3753iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3754 uint8_t cbInstr,
3755 uint8_t u8Vector,
3756 uint32_t fFlags,
3757 uint16_t uErr,
3758 uint64_t uCr2)
3759{
3760 NOREF(uErr); NOREF(uCr2);
3761 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3762
3763 /*
3764 * Read the IDT entry.
3765 */
3766 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3767 {
3768 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3769 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3770 }
3771 RTFAR16 Idte;
3772 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3773 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3774 {
3775 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3776 return rcStrict;
3777 }
3778
3779 /*
3780 * Push the stack frame.
3781 */
3782 uint16_t *pu16Frame;
3783 uint64_t uNewRsp;
3784 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787
3788 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3789#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3790 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3791 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3792 fEfl |= UINT16_C(0xf000);
3793#endif
3794 pu16Frame[2] = (uint16_t)fEfl;
3795 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3796 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3797 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3798 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3799 return rcStrict;
3800
3801 /*
3802 * Load the vector address into cs:ip and make exception specific state
3803 * adjustments.
3804 */
3805 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3806 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3807 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3808 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3809 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3810 pVCpu->cpum.GstCtx.rip = Idte.off;
3811 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3812 IEMMISC_SET_EFL(pVCpu, fEfl);
3813
3814 /** @todo do we actually do this in real mode? */
3815 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3816 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3817
3818 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3819}
3820
3821
3822/**
3823 * Loads a NULL data selector into when coming from V8086 mode.
3824 *
3825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3826 * @param pSReg Pointer to the segment register.
3827 */
3828IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3829{
3830 pSReg->Sel = 0;
3831 pSReg->ValidSel = 0;
3832 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3833 {
3834 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3835 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3836 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3837 }
3838 else
3839 {
3840 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3841 /** @todo check this on AMD-V */
3842 pSReg->u64Base = 0;
3843 pSReg->u32Limit = 0;
3844 }
3845}
3846
3847
3848/**
3849 * Loads a segment selector during a task switch in V8086 mode.
3850 *
3851 * @param pSReg Pointer to the segment register.
3852 * @param uSel The selector value to load.
3853 */
3854IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3855{
3856 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3857 pSReg->Sel = uSel;
3858 pSReg->ValidSel = uSel;
3859 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3860 pSReg->u64Base = uSel << 4;
3861 pSReg->u32Limit = 0xffff;
3862 pSReg->Attr.u = 0xf3;
3863}
3864
3865
3866/**
3867 * Loads a NULL data selector into a selector register, both the hidden and
3868 * visible parts, in protected mode.
3869 *
3870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3871 * @param pSReg Pointer to the segment register.
3872 * @param uRpl The RPL.
3873 */
3874IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3875{
3876 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3877 * data selector in protected mode. */
3878 pSReg->Sel = uRpl;
3879 pSReg->ValidSel = uRpl;
3880 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3881 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3882 {
3883 /* VT-x (Intel 3960x) observed doing something like this. */
3884 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3885 pSReg->u32Limit = UINT32_MAX;
3886 pSReg->u64Base = 0;
3887 }
3888 else
3889 {
3890 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3891 pSReg->u32Limit = 0;
3892 pSReg->u64Base = 0;
3893 }
3894}
3895
3896
3897/**
3898 * Loads a segment selector during a task switch in protected mode.
3899 *
3900 * In this task switch scenario, we would throw \#TS exceptions rather than
3901 * \#GPs.
3902 *
3903 * @returns VBox strict status code.
3904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3905 * @param pSReg Pointer to the segment register.
3906 * @param uSel The new selector value.
3907 *
3908 * @remarks This does _not_ handle CS or SS.
3909 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3910 */
3911IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3912{
3913 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3914
3915 /* Null data selector. */
3916 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3917 {
3918 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3919 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3920 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3921 return VINF_SUCCESS;
3922 }
3923
3924 /* Fetch the descriptor. */
3925 IEMSELDESC Desc;
3926 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3927 if (rcStrict != VINF_SUCCESS)
3928 {
3929 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3930 VBOXSTRICTRC_VAL(rcStrict)));
3931 return rcStrict;
3932 }
3933
3934 /* Must be a data segment or readable code segment. */
3935 if ( !Desc.Legacy.Gen.u1DescType
3936 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3937 {
3938 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3939 Desc.Legacy.Gen.u4Type));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3941 }
3942
3943 /* Check privileges for data segments and non-conforming code segments. */
3944 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3945 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3946 {
3947 /* The RPL and the new CPL must be less than or equal to the DPL. */
3948 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3949 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3950 {
3951 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3952 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3953 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3954 }
3955 }
3956
3957 /* Is it there? */
3958 if (!Desc.Legacy.Gen.u1Present)
3959 {
3960 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3961 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3962 }
3963
3964 /* The base and limit. */
3965 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3966 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3967
3968 /*
3969 * Ok, everything checked out fine. Now set the accessed bit before
3970 * committing the result into the registers.
3971 */
3972 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3973 {
3974 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3975 if (rcStrict != VINF_SUCCESS)
3976 return rcStrict;
3977 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3978 }
3979
3980 /* Commit */
3981 pSReg->Sel = uSel;
3982 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3983 pSReg->u32Limit = cbLimit;
3984 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3985 pSReg->ValidSel = uSel;
3986 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3987 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3988 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3989
3990 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3991 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3992 return VINF_SUCCESS;
3993}
3994
3995
3996/**
3997 * Performs a task switch.
3998 *
3999 * If the task switch is the result of a JMP, CALL or IRET instruction, the
4000 * caller is responsible for performing the necessary checks (like DPL, TSS
4001 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
4002 * reference for JMP, CALL, IRET.
4003 *
4004 * If the task switch is the due to a software interrupt or hardware exception,
4005 * the caller is responsible for validating the TSS selector and descriptor. See
4006 * Intel Instruction reference for INT n.
4007 *
4008 * @returns VBox strict status code.
4009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4010 * @param enmTaskSwitch The cause of the task switch.
4011 * @param uNextEip The EIP effective after the task switch.
4012 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4013 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4014 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4015 * @param SelTSS The TSS selector of the new task.
4016 * @param pNewDescTSS Pointer to the new TSS descriptor.
4017 */
4018IEM_STATIC VBOXSTRICTRC
4019iemTaskSwitch(PVMCPU pVCpu,
4020 IEMTASKSWITCH enmTaskSwitch,
4021 uint32_t uNextEip,
4022 uint32_t fFlags,
4023 uint16_t uErr,
4024 uint64_t uCr2,
4025 RTSEL SelTSS,
4026 PIEMSELDESC pNewDescTSS)
4027{
4028 Assert(!IEM_IS_REAL_MODE(pVCpu));
4029 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4030 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4031
4032 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4033 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4034 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4035 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4036 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4037
4038 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4039 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4040
4041 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4042 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4043
4044 /* Update CR2 in case it's a page-fault. */
4045 /** @todo This should probably be done much earlier in IEM/PGM. See
4046 * @bugref{5653#c49}. */
4047 if (fFlags & IEM_XCPT_FLAGS_CR2)
4048 pVCpu->cpum.GstCtx.cr2 = uCr2;
4049
4050 /*
4051 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4052 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4053 */
4054 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4055 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4056 if (uNewTSSLimit < uNewTSSLimitMin)
4057 {
4058 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4059 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4060 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4061 }
4062
4063 /*
4064 * Task switches in VMX non-root mode always cause task switches.
4065 * The new TSS must have been read and validated (DPL, limits etc.) before a
4066 * task-switch VM-exit commences.
4067 *
4068 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4069 */
4070 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4071 {
4072 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4073 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4074 }
4075
4076 /*
4077 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4078 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4079 */
4080 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4081 {
4082 uint32_t const uExitInfo1 = SelTSS;
4083 uint32_t uExitInfo2 = uErr;
4084 switch (enmTaskSwitch)
4085 {
4086 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4087 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4088 default: break;
4089 }
4090 if (fFlags & IEM_XCPT_FLAGS_ERR)
4091 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4092 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4093 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4094
4095 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4096 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4097 RT_NOREF2(uExitInfo1, uExitInfo2);
4098 }
4099
4100 /*
4101 * Check the current TSS limit. The last written byte to the current TSS during the
4102 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4103 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4104 *
4105 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4106 * end up with smaller than "legal" TSS limits.
4107 */
4108 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4109 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4110 if (uCurTSSLimit < uCurTSSLimitMin)
4111 {
4112 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4113 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4114 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4115 }
4116
4117 /*
4118 * Verify that the new TSS can be accessed and map it. Map only the required contents
4119 * and not the entire TSS.
4120 */
4121 void *pvNewTSS;
4122 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4123 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4124 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4125 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4126 * not perform correct translation if this happens. See Intel spec. 7.2.1
4127 * "Task-State Segment" */
4128 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4129 if (rcStrict != VINF_SUCCESS)
4130 {
4131 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4132 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4133 return rcStrict;
4134 }
4135
4136 /*
4137 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4138 */
4139 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4140 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4141 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4142 {
4143 PX86DESC pDescCurTSS;
4144 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4145 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4146 if (rcStrict != VINF_SUCCESS)
4147 {
4148 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4149 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4150 return rcStrict;
4151 }
4152
4153 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4154 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4155 if (rcStrict != VINF_SUCCESS)
4156 {
4157 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4158 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4159 return rcStrict;
4160 }
4161
4162 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4163 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4164 {
4165 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4166 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4167 u32EFlags &= ~X86_EFL_NT;
4168 }
4169 }
4170
4171 /*
4172 * Save the CPU state into the current TSS.
4173 */
4174 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4175 if (GCPtrNewTSS == GCPtrCurTSS)
4176 {
4177 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4178 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4179 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4180 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4181 pVCpu->cpum.GstCtx.ldtr.Sel));
4182 }
4183 if (fIsNewTSS386)
4184 {
4185 /*
4186 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4187 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4188 */
4189 void *pvCurTSS32;
4190 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4191 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4192 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4193 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4194 if (rcStrict != VINF_SUCCESS)
4195 {
4196 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4197 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4198 return rcStrict;
4199 }
4200
4201 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4202 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4203 pCurTSS32->eip = uNextEip;
4204 pCurTSS32->eflags = u32EFlags;
4205 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4206 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4207 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4208 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4209 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4210 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4211 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4212 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4213 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4214 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4215 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4216 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4217 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4218 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4219
4220 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4221 if (rcStrict != VINF_SUCCESS)
4222 {
4223 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4224 VBOXSTRICTRC_VAL(rcStrict)));
4225 return rcStrict;
4226 }
4227 }
4228 else
4229 {
4230 /*
4231 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4232 */
4233 void *pvCurTSS16;
4234 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4235 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4236 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4237 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4238 if (rcStrict != VINF_SUCCESS)
4239 {
4240 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4241 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4242 return rcStrict;
4243 }
4244
4245 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4246 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4247 pCurTSS16->ip = uNextEip;
4248 pCurTSS16->flags = u32EFlags;
4249 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4250 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4251 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4252 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4253 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4254 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4255 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4256 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4257 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4258 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4259 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4260 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4261
4262 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4263 if (rcStrict != VINF_SUCCESS)
4264 {
4265 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4266 VBOXSTRICTRC_VAL(rcStrict)));
4267 return rcStrict;
4268 }
4269 }
4270
4271 /*
4272 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4273 */
4274 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4275 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4276 {
4277 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4278 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4279 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4280 }
4281
4282 /*
4283 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4284 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4285 */
4286 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4287 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4288 bool fNewDebugTrap;
4289 if (fIsNewTSS386)
4290 {
4291 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4292 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4293 uNewEip = pNewTSS32->eip;
4294 uNewEflags = pNewTSS32->eflags;
4295 uNewEax = pNewTSS32->eax;
4296 uNewEcx = pNewTSS32->ecx;
4297 uNewEdx = pNewTSS32->edx;
4298 uNewEbx = pNewTSS32->ebx;
4299 uNewEsp = pNewTSS32->esp;
4300 uNewEbp = pNewTSS32->ebp;
4301 uNewEsi = pNewTSS32->esi;
4302 uNewEdi = pNewTSS32->edi;
4303 uNewES = pNewTSS32->es;
4304 uNewCS = pNewTSS32->cs;
4305 uNewSS = pNewTSS32->ss;
4306 uNewDS = pNewTSS32->ds;
4307 uNewFS = pNewTSS32->fs;
4308 uNewGS = pNewTSS32->gs;
4309 uNewLdt = pNewTSS32->selLdt;
4310 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4311 }
4312 else
4313 {
4314 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4315 uNewCr3 = 0;
4316 uNewEip = pNewTSS16->ip;
4317 uNewEflags = pNewTSS16->flags;
4318 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4319 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4320 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4321 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4322 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4323 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4324 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4325 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4326 uNewES = pNewTSS16->es;
4327 uNewCS = pNewTSS16->cs;
4328 uNewSS = pNewTSS16->ss;
4329 uNewDS = pNewTSS16->ds;
4330 uNewFS = 0;
4331 uNewGS = 0;
4332 uNewLdt = pNewTSS16->selLdt;
4333 fNewDebugTrap = false;
4334 }
4335
4336 if (GCPtrNewTSS == GCPtrCurTSS)
4337 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4338 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4339
4340 /*
4341 * We're done accessing the new TSS.
4342 */
4343 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4344 if (rcStrict != VINF_SUCCESS)
4345 {
4346 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4347 return rcStrict;
4348 }
4349
4350 /*
4351 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4352 */
4353 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4354 {
4355 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4356 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4357 if (rcStrict != VINF_SUCCESS)
4358 {
4359 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4360 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4361 return rcStrict;
4362 }
4363
4364 /* Check that the descriptor indicates the new TSS is available (not busy). */
4365 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4366 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4367 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4368
4369 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4370 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4371 if (rcStrict != VINF_SUCCESS)
4372 {
4373 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4374 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4375 return rcStrict;
4376 }
4377 }
4378
4379 /*
4380 * From this point on, we're technically in the new task. We will defer exceptions
4381 * until the completion of the task switch but before executing any instructions in the new task.
4382 */
4383 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4384 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4385 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4386 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4387 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4388 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4389 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4390
4391 /* Set the busy bit in TR. */
4392 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4393 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4394 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4395 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4396 {
4397 uNewEflags |= X86_EFL_NT;
4398 }
4399
4400 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4401 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4402 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4403
4404 pVCpu->cpum.GstCtx.eip = uNewEip;
4405 pVCpu->cpum.GstCtx.eax = uNewEax;
4406 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4407 pVCpu->cpum.GstCtx.edx = uNewEdx;
4408 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4409 pVCpu->cpum.GstCtx.esp = uNewEsp;
4410 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4411 pVCpu->cpum.GstCtx.esi = uNewEsi;
4412 pVCpu->cpum.GstCtx.edi = uNewEdi;
4413
4414 uNewEflags &= X86_EFL_LIVE_MASK;
4415 uNewEflags |= X86_EFL_RA1_MASK;
4416 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4417
4418 /*
4419 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4420 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4421 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4422 */
4423 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4424 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4425
4426 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4427 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4428
4429 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4430 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4431
4432 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4433 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4434
4435 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4436 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4437
4438 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4439 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4440 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4441
4442 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4443 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4444 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4445 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4446
4447 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4448 {
4449 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4450 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4451 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4452 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4453 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4454 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4455 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4456 }
4457
4458 /*
4459 * Switch CR3 for the new task.
4460 */
4461 if ( fIsNewTSS386
4462 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4463 {
4464 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4465 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4466 AssertRCSuccessReturn(rc, rc);
4467
4468 /* Inform PGM. */
4469 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4470 AssertRCReturn(rc, rc);
4471 /* ignore informational status codes */
4472
4473 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4474 }
4475
4476 /*
4477 * Switch LDTR for the new task.
4478 */
4479 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4480 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4481 else
4482 {
4483 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4484
4485 IEMSELDESC DescNewLdt;
4486 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4487 if (rcStrict != VINF_SUCCESS)
4488 {
4489 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4490 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4491 return rcStrict;
4492 }
4493 if ( !DescNewLdt.Legacy.Gen.u1Present
4494 || DescNewLdt.Legacy.Gen.u1DescType
4495 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4496 {
4497 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4498 uNewLdt, DescNewLdt.Legacy.u));
4499 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4500 }
4501
4502 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4503 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4504 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4505 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4506 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4507 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4508 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4509 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4510 }
4511
4512 IEMSELDESC DescSS;
4513 if (IEM_IS_V86_MODE(pVCpu))
4514 {
4515 pVCpu->iem.s.uCpl = 3;
4516 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4517 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4518 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4519 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4520 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4521 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4522
4523 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4524 DescSS.Legacy.u = 0;
4525 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4526 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4527 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4528 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4529 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4530 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4531 DescSS.Legacy.Gen.u2Dpl = 3;
4532 }
4533 else
4534 {
4535 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4536
4537 /*
4538 * Load the stack segment for the new task.
4539 */
4540 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4541 {
4542 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4543 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 /* Fetch the descriptor. */
4547 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4548 if (rcStrict != VINF_SUCCESS)
4549 {
4550 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4551 VBOXSTRICTRC_VAL(rcStrict)));
4552 return rcStrict;
4553 }
4554
4555 /* SS must be a data segment and writable. */
4556 if ( !DescSS.Legacy.Gen.u1DescType
4557 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4558 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4559 {
4560 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4561 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4562 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4563 }
4564
4565 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4566 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4567 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4568 {
4569 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4570 uNewCpl));
4571 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4572 }
4573
4574 /* Is it there? */
4575 if (!DescSS.Legacy.Gen.u1Present)
4576 {
4577 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4578 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4579 }
4580
4581 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4582 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4583
4584 /* Set the accessed bit before committing the result into SS. */
4585 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4586 {
4587 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4588 if (rcStrict != VINF_SUCCESS)
4589 return rcStrict;
4590 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4591 }
4592
4593 /* Commit SS. */
4594 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4595 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4596 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4597 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4598 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4599 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4600 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4601
4602 /* CPL has changed, update IEM before loading rest of segments. */
4603 pVCpu->iem.s.uCpl = uNewCpl;
4604
4605 /*
4606 * Load the data segments for the new task.
4607 */
4608 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4609 if (rcStrict != VINF_SUCCESS)
4610 return rcStrict;
4611 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4612 if (rcStrict != VINF_SUCCESS)
4613 return rcStrict;
4614 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4615 if (rcStrict != VINF_SUCCESS)
4616 return rcStrict;
4617 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4618 if (rcStrict != VINF_SUCCESS)
4619 return rcStrict;
4620
4621 /*
4622 * Load the code segment for the new task.
4623 */
4624 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4625 {
4626 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4627 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4628 }
4629
4630 /* Fetch the descriptor. */
4631 IEMSELDESC DescCS;
4632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4633 if (rcStrict != VINF_SUCCESS)
4634 {
4635 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4636 return rcStrict;
4637 }
4638
4639 /* CS must be a code segment. */
4640 if ( !DescCS.Legacy.Gen.u1DescType
4641 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4642 {
4643 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4644 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4645 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4646 }
4647
4648 /* For conforming CS, DPL must be less than or equal to the RPL. */
4649 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4650 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4651 {
4652 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4653 DescCS.Legacy.Gen.u2Dpl));
4654 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4655 }
4656
4657 /* For non-conforming CS, DPL must match RPL. */
4658 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4659 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4660 {
4661 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4662 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4663 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4664 }
4665
4666 /* Is it there? */
4667 if (!DescCS.Legacy.Gen.u1Present)
4668 {
4669 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4670 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4671 }
4672
4673 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4674 u64Base = X86DESC_BASE(&DescCS.Legacy);
4675
4676 /* Set the accessed bit before committing the result into CS. */
4677 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4678 {
4679 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4680 if (rcStrict != VINF_SUCCESS)
4681 return rcStrict;
4682 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4683 }
4684
4685 /* Commit CS. */
4686 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4687 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4688 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4689 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4690 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4691 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4692 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4693 }
4694
4695 /** @todo Debug trap. */
4696 if (fIsNewTSS386 && fNewDebugTrap)
4697 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4698
4699 /*
4700 * Construct the error code masks based on what caused this task switch.
4701 * See Intel Instruction reference for INT.
4702 */
4703 uint16_t uExt;
4704 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4705 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4706 {
4707 uExt = 1;
4708 }
4709 else
4710 uExt = 0;
4711
4712 /*
4713 * Push any error code on to the new stack.
4714 */
4715 if (fFlags & IEM_XCPT_FLAGS_ERR)
4716 {
4717 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4718 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4719 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4720
4721 /* Check that there is sufficient space on the stack. */
4722 /** @todo Factor out segment limit checking for normal/expand down segments
4723 * into a separate function. */
4724 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4725 {
4726 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4727 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4728 {
4729 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4730 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4731 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4732 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4733 }
4734 }
4735 else
4736 {
4737 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4738 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4739 {
4740 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4741 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4742 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4743 }
4744 }
4745
4746
4747 if (fIsNewTSS386)
4748 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4749 else
4750 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4751 if (rcStrict != VINF_SUCCESS)
4752 {
4753 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4754 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4755 return rcStrict;
4756 }
4757 }
4758
4759 /* Check the new EIP against the new CS limit. */
4760 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4761 {
4762 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4763 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4764 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4765 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4766 }
4767
4768 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4769 pVCpu->cpum.GstCtx.ss.Sel));
4770 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4771}
4772
4773
4774/**
4775 * Implements exceptions and interrupts for protected mode.
4776 *
4777 * @returns VBox strict status code.
4778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4779 * @param cbInstr The number of bytes to offset rIP by in the return
4780 * address.
4781 * @param u8Vector The interrupt / exception vector number.
4782 * @param fFlags The flags.
4783 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4784 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4785 */
4786IEM_STATIC VBOXSTRICTRC
4787iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4788 uint8_t cbInstr,
4789 uint8_t u8Vector,
4790 uint32_t fFlags,
4791 uint16_t uErr,
4792 uint64_t uCr2)
4793{
4794 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4795
4796 /*
4797 * Read the IDT entry.
4798 */
4799 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4800 {
4801 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4802 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4803 }
4804 X86DESC Idte;
4805 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4806 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4807 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4808 {
4809 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4810 return rcStrict;
4811 }
4812 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4813 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4814 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4815
4816 /*
4817 * Check the descriptor type, DPL and such.
4818 * ASSUMES this is done in the same order as described for call-gate calls.
4819 */
4820 if (Idte.Gate.u1DescType)
4821 {
4822 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4823 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4824 }
4825 bool fTaskGate = false;
4826 uint8_t f32BitGate = true;
4827 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4828 switch (Idte.Gate.u4Type)
4829 {
4830 case X86_SEL_TYPE_SYS_UNDEFINED:
4831 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4832 case X86_SEL_TYPE_SYS_LDT:
4833 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4834 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4835 case X86_SEL_TYPE_SYS_UNDEFINED2:
4836 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4837 case X86_SEL_TYPE_SYS_UNDEFINED3:
4838 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4839 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4840 case X86_SEL_TYPE_SYS_UNDEFINED4:
4841 {
4842 /** @todo check what actually happens when the type is wrong...
4843 * esp. call gates. */
4844 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4845 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4846 }
4847
4848 case X86_SEL_TYPE_SYS_286_INT_GATE:
4849 f32BitGate = false;
4850 RT_FALL_THRU();
4851 case X86_SEL_TYPE_SYS_386_INT_GATE:
4852 fEflToClear |= X86_EFL_IF;
4853 break;
4854
4855 case X86_SEL_TYPE_SYS_TASK_GATE:
4856 fTaskGate = true;
4857#ifndef IEM_IMPLEMENTS_TASKSWITCH
4858 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4859#endif
4860 break;
4861
4862 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4863 f32BitGate = false;
4864 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4865 break;
4866
4867 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4868 }
4869
4870 /* Check DPL against CPL if applicable. */
4871 if (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR) == IEM_XCPT_FLAGS_T_SOFT_INT)
4872 {
4873 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4874 {
4875 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4876 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4877 }
4878 }
4879
4880 /* Is it there? */
4881 if (!Idte.Gate.u1Present)
4882 {
4883 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4884 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4885 }
4886
4887 /* Is it a task-gate? */
4888 if (fTaskGate)
4889 {
4890 /*
4891 * Construct the error code masks based on what caused this task switch.
4892 * See Intel Instruction reference for INT.
4893 */
4894 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4895 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4896 RTSEL SelTSS = Idte.Gate.u16Sel;
4897
4898 /*
4899 * Fetch the TSS descriptor in the GDT.
4900 */
4901 IEMSELDESC DescTSS;
4902 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4903 if (rcStrict != VINF_SUCCESS)
4904 {
4905 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4906 VBOXSTRICTRC_VAL(rcStrict)));
4907 return rcStrict;
4908 }
4909
4910 /* The TSS descriptor must be a system segment and be available (not busy). */
4911 if ( DescTSS.Legacy.Gen.u1DescType
4912 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4913 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4914 {
4915 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4916 u8Vector, SelTSS, DescTSS.Legacy.au64));
4917 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4918 }
4919
4920 /* The TSS must be present. */
4921 if (!DescTSS.Legacy.Gen.u1Present)
4922 {
4923 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4924 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4925 }
4926
4927 /* Do the actual task switch. */
4928 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4929 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4930 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4931 }
4932
4933 /* A null CS is bad. */
4934 RTSEL NewCS = Idte.Gate.u16Sel;
4935 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4936 {
4937 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4938 return iemRaiseGeneralProtectionFault0(pVCpu);
4939 }
4940
4941 /* Fetch the descriptor for the new CS. */
4942 IEMSELDESC DescCS;
4943 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4944 if (rcStrict != VINF_SUCCESS)
4945 {
4946 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4947 return rcStrict;
4948 }
4949
4950 /* Must be a code segment. */
4951 if (!DescCS.Legacy.Gen.u1DescType)
4952 {
4953 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4954 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4955 }
4956 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4957 {
4958 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4959 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4960 }
4961
4962 /* Don't allow lowering the privilege level. */
4963 /** @todo Does the lowering of privileges apply to software interrupts
4964 * only? This has bearings on the more-privileged or
4965 * same-privilege stack behavior further down. A testcase would
4966 * be nice. */
4967 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4968 {
4969 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4970 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4971 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4972 }
4973
4974 /* Make sure the selector is present. */
4975 if (!DescCS.Legacy.Gen.u1Present)
4976 {
4977 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4978 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4979 }
4980
4981 /* Check the new EIP against the new CS limit. */
4982 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4983 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4984 ? Idte.Gate.u16OffsetLow
4985 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4986 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4987 if (uNewEip > cbLimitCS)
4988 {
4989 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4990 u8Vector, uNewEip, cbLimitCS, NewCS));
4991 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4992 }
4993 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4994
4995 /* Calc the flag image to push. */
4996 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4997 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4998 fEfl &= ~X86_EFL_RF;
4999 else
5000 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5001
5002 /* From V8086 mode only go to CPL 0. */
5003 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5004 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5005 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5006 {
5007 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5008 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5009 }
5010
5011 /*
5012 * If the privilege level changes, we need to get a new stack from the TSS.
5013 * This in turns means validating the new SS and ESP...
5014 */
5015 if (uNewCpl != pVCpu->iem.s.uCpl)
5016 {
5017 RTSEL NewSS;
5018 uint32_t uNewEsp;
5019 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5020 if (rcStrict != VINF_SUCCESS)
5021 return rcStrict;
5022
5023 IEMSELDESC DescSS;
5024 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5025 if (rcStrict != VINF_SUCCESS)
5026 return rcStrict;
5027 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5028 if (!DescSS.Legacy.Gen.u1DefBig)
5029 {
5030 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5031 uNewEsp = (uint16_t)uNewEsp;
5032 }
5033
5034 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5035
5036 /* Check that there is sufficient space for the stack frame. */
5037 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5038 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5039 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5040 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5041
5042 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5043 {
5044 if ( uNewEsp - 1 > cbLimitSS
5045 || uNewEsp < cbStackFrame)
5046 {
5047 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5048 u8Vector, NewSS, uNewEsp, cbStackFrame));
5049 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5050 }
5051 }
5052 else
5053 {
5054 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5055 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5056 {
5057 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5058 u8Vector, NewSS, uNewEsp, cbStackFrame));
5059 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5060 }
5061 }
5062
5063 /*
5064 * Start making changes.
5065 */
5066
5067 /* Set the new CPL so that stack accesses use it. */
5068 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5069 pVCpu->iem.s.uCpl = uNewCpl;
5070
5071 /* Create the stack frame. */
5072 RTPTRUNION uStackFrame;
5073 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5074 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5075 if (rcStrict != VINF_SUCCESS)
5076 return rcStrict;
5077 void * const pvStackFrame = uStackFrame.pv;
5078 if (f32BitGate)
5079 {
5080 if (fFlags & IEM_XCPT_FLAGS_ERR)
5081 *uStackFrame.pu32++ = uErr;
5082 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5083 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5084 uStackFrame.pu32[2] = fEfl;
5085 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5086 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5087 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5088 if (fEfl & X86_EFL_VM)
5089 {
5090 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5091 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5092 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5093 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5094 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5095 }
5096 }
5097 else
5098 {
5099 if (fFlags & IEM_XCPT_FLAGS_ERR)
5100 *uStackFrame.pu16++ = uErr;
5101 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5102 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5103 uStackFrame.pu16[2] = fEfl;
5104 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5105 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5106 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5107 if (fEfl & X86_EFL_VM)
5108 {
5109 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5110 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5111 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5112 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5113 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5114 }
5115 }
5116 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5117 if (rcStrict != VINF_SUCCESS)
5118 return rcStrict;
5119
5120 /* Mark the selectors 'accessed' (hope this is the correct time). */
5121 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5122 * after pushing the stack frame? (Write protect the gdt + stack to
5123 * find out.) */
5124 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5125 {
5126 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5127 if (rcStrict != VINF_SUCCESS)
5128 return rcStrict;
5129 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5130 }
5131
5132 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5133 {
5134 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5135 if (rcStrict != VINF_SUCCESS)
5136 return rcStrict;
5137 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5138 }
5139
5140 /*
5141 * Start comitting the register changes (joins with the DPL=CPL branch).
5142 */
5143 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5144 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5145 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5146 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5147 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5148 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5149 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5150 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5151 * SP is loaded).
5152 * Need to check the other combinations too:
5153 * - 16-bit TSS, 32-bit handler
5154 * - 32-bit TSS, 16-bit handler */
5155 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5156 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5157 else
5158 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5159
5160 if (fEfl & X86_EFL_VM)
5161 {
5162 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5163 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5164 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5165 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5166 }
5167 }
5168 /*
5169 * Same privilege, no stack change and smaller stack frame.
5170 */
5171 else
5172 {
5173 uint64_t uNewRsp;
5174 RTPTRUNION uStackFrame;
5175 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5176 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5177 if (rcStrict != VINF_SUCCESS)
5178 return rcStrict;
5179 void * const pvStackFrame = uStackFrame.pv;
5180
5181 if (f32BitGate)
5182 {
5183 if (fFlags & IEM_XCPT_FLAGS_ERR)
5184 *uStackFrame.pu32++ = uErr;
5185 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5186 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5187 uStackFrame.pu32[2] = fEfl;
5188 }
5189 else
5190 {
5191 if (fFlags & IEM_XCPT_FLAGS_ERR)
5192 *uStackFrame.pu16++ = uErr;
5193 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5194 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5195 uStackFrame.pu16[2] = fEfl;
5196 }
5197 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5198 if (rcStrict != VINF_SUCCESS)
5199 return rcStrict;
5200
5201 /* Mark the CS selector as 'accessed'. */
5202 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5203 {
5204 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5205 if (rcStrict != VINF_SUCCESS)
5206 return rcStrict;
5207 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5208 }
5209
5210 /*
5211 * Start committing the register changes (joins with the other branch).
5212 */
5213 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5214 }
5215
5216 /* ... register committing continues. */
5217 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5218 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5219 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5220 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5221 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5222 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5223
5224 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5225 fEfl &= ~fEflToClear;
5226 IEMMISC_SET_EFL(pVCpu, fEfl);
5227
5228 if (fFlags & IEM_XCPT_FLAGS_CR2)
5229 pVCpu->cpum.GstCtx.cr2 = uCr2;
5230
5231 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5232 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5233
5234 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5235}
5236
5237
5238/**
5239 * Implements exceptions and interrupts for long mode.
5240 *
5241 * @returns VBox strict status code.
5242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5243 * @param cbInstr The number of bytes to offset rIP by in the return
5244 * address.
5245 * @param u8Vector The interrupt / exception vector number.
5246 * @param fFlags The flags.
5247 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5248 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5249 */
5250IEM_STATIC VBOXSTRICTRC
5251iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5252 uint8_t cbInstr,
5253 uint8_t u8Vector,
5254 uint32_t fFlags,
5255 uint16_t uErr,
5256 uint64_t uCr2)
5257{
5258 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5259
5260 /*
5261 * Read the IDT entry.
5262 */
5263 uint16_t offIdt = (uint16_t)u8Vector << 4;
5264 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5265 {
5266 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5267 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5268 }
5269 X86DESC64 Idte;
5270 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5271 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5272 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5273 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5276 return rcStrict;
5277 }
5278 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5279 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5280 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5281
5282 /*
5283 * Check the descriptor type, DPL and such.
5284 * ASSUMES this is done in the same order as described for call-gate calls.
5285 */
5286 if (Idte.Gate.u1DescType)
5287 {
5288 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5289 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5290 }
5291 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5292 switch (Idte.Gate.u4Type)
5293 {
5294 case AMD64_SEL_TYPE_SYS_INT_GATE:
5295 fEflToClear |= X86_EFL_IF;
5296 break;
5297 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5298 break;
5299
5300 default:
5301 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5302 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5303 }
5304
5305 /* Check DPL against CPL if applicable. */
5306 if (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR) == IEM_XCPT_FLAGS_T_SOFT_INT)
5307 {
5308 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5309 {
5310 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5311 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5312 }
5313 }
5314
5315 /* Is it there? */
5316 if (!Idte.Gate.u1Present)
5317 {
5318 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5319 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5320 }
5321
5322 /* A null CS is bad. */
5323 RTSEL NewCS = Idte.Gate.u16Sel;
5324 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5325 {
5326 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5327 return iemRaiseGeneralProtectionFault0(pVCpu);
5328 }
5329
5330 /* Fetch the descriptor for the new CS. */
5331 IEMSELDESC DescCS;
5332 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5333 if (rcStrict != VINF_SUCCESS)
5334 {
5335 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5336 return rcStrict;
5337 }
5338
5339 /* Must be a 64-bit code segment. */
5340 if (!DescCS.Long.Gen.u1DescType)
5341 {
5342 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5343 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5344 }
5345 if ( !DescCS.Long.Gen.u1Long
5346 || DescCS.Long.Gen.u1DefBig
5347 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5348 {
5349 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5350 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5351 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5352 }
5353
5354 /* Don't allow lowering the privilege level. For non-conforming CS
5355 selectors, the CS.DPL sets the privilege level the trap/interrupt
5356 handler runs at. For conforming CS selectors, the CPL remains
5357 unchanged, but the CS.DPL must be <= CPL. */
5358 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5359 * when CPU in Ring-0. Result \#GP? */
5360 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5361 {
5362 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5363 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5364 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5365 }
5366
5367
5368 /* Make sure the selector is present. */
5369 if (!DescCS.Legacy.Gen.u1Present)
5370 {
5371 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5372 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5373 }
5374
5375 /* Check that the new RIP is canonical. */
5376 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5377 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5378 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5379 if (!IEM_IS_CANONICAL(uNewRip))
5380 {
5381 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5382 return iemRaiseGeneralProtectionFault0(pVCpu);
5383 }
5384
5385 /*
5386 * If the privilege level changes or if the IST isn't zero, we need to get
5387 * a new stack from the TSS.
5388 */
5389 uint64_t uNewRsp;
5390 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5391 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5392 if ( uNewCpl != pVCpu->iem.s.uCpl
5393 || Idte.Gate.u3IST != 0)
5394 {
5395 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5396 if (rcStrict != VINF_SUCCESS)
5397 return rcStrict;
5398 }
5399 else
5400 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5401 uNewRsp &= ~(uint64_t)0xf;
5402
5403 /*
5404 * Calc the flag image to push.
5405 */
5406 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5407 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5408 fEfl &= ~X86_EFL_RF;
5409 else
5410 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5411
5412 /*
5413 * Start making changes.
5414 */
5415 /* Set the new CPL so that stack accesses use it. */
5416 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5417 pVCpu->iem.s.uCpl = uNewCpl;
5418
5419 /* Create the stack frame. */
5420 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5421 RTPTRUNION uStackFrame;
5422 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5423 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5424 if (rcStrict != VINF_SUCCESS)
5425 return rcStrict;
5426 void * const pvStackFrame = uStackFrame.pv;
5427
5428 if (fFlags & IEM_XCPT_FLAGS_ERR)
5429 *uStackFrame.pu64++ = uErr;
5430 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5431 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5432 uStackFrame.pu64[2] = fEfl;
5433 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5434 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5435 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5436 if (rcStrict != VINF_SUCCESS)
5437 return rcStrict;
5438
5439 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5440 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5441 * after pushing the stack frame? (Write protect the gdt + stack to
5442 * find out.) */
5443 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5444 {
5445 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5446 if (rcStrict != VINF_SUCCESS)
5447 return rcStrict;
5448 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5449 }
5450
5451 /*
5452 * Start comitting the register changes.
5453 */
5454 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5455 * hidden registers when interrupting 32-bit or 16-bit code! */
5456 if (uNewCpl != uOldCpl)
5457 {
5458 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5459 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5460 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5461 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5462 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5463 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5464 }
5465 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5466 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5467 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5468 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5469 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5470 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5471 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5472 pVCpu->cpum.GstCtx.rip = uNewRip;
5473
5474 fEfl &= ~fEflToClear;
5475 IEMMISC_SET_EFL(pVCpu, fEfl);
5476
5477 if (fFlags & IEM_XCPT_FLAGS_CR2)
5478 pVCpu->cpum.GstCtx.cr2 = uCr2;
5479
5480 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5481 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5482
5483 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5484}
5485
5486
5487/**
5488 * Implements exceptions and interrupts.
5489 *
5490 * All exceptions and interrupts goes thru this function!
5491 *
5492 * @returns VBox strict status code.
5493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5494 * @param cbInstr The number of bytes to offset rIP by in the return
5495 * address.
5496 * @param u8Vector The interrupt / exception vector number.
5497 * @param fFlags The flags.
5498 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5499 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5500 */
5501DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5502iemRaiseXcptOrInt(PVMCPU pVCpu,
5503 uint8_t cbInstr,
5504 uint8_t u8Vector,
5505 uint32_t fFlags,
5506 uint16_t uErr,
5507 uint64_t uCr2)
5508{
5509 /*
5510 * Get all the state that we might need here.
5511 */
5512 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5513 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5514
5515#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5516 /*
5517 * Flush prefetch buffer
5518 */
5519 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5520#endif
5521
5522 /*
5523 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5524 */
5525 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5526 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5527 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5528 | IEM_XCPT_FLAGS_BP_INSTR
5529 | IEM_XCPT_FLAGS_ICEBP_INSTR
5530 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5531 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5532 {
5533 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5534 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5535 u8Vector = X86_XCPT_GP;
5536 uErr = 0;
5537 }
5538#ifdef DBGFTRACE_ENABLED
5539 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5540 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5541 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5542#endif
5543
5544 /*
5545 * Evaluate whether NMI blocking should be in effect.
5546 * Normally, NMI blocking is in effect whenever we inject an NMI.
5547 */
5548 bool fBlockNmi;
5549 if ( u8Vector == X86_XCPT_NMI
5550 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5551 fBlockNmi = true;
5552 else
5553 fBlockNmi = false;
5554
5555#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5556 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5557 {
5558 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5559 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5560 return rcStrict0;
5561
5562 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5563 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5564 {
5565 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5566 fBlockNmi = false;
5567 }
5568 }
5569#endif
5570
5571#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5572 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5573 {
5574 /*
5575 * If the event is being injected as part of VMRUN, it isn't subject to event
5576 * intercepts in the nested-guest. However, secondary exceptions that occur
5577 * during injection of any event -are- subject to exception intercepts.
5578 *
5579 * See AMD spec. 15.20 "Event Injection".
5580 */
5581 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5582 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5583 else
5584 {
5585 /*
5586 * Check and handle if the event being raised is intercepted.
5587 */
5588 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5589 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5590 return rcStrict0;
5591 }
5592 }
5593#endif
5594
5595 /*
5596 * Set NMI blocking if necessary.
5597 */
5598 if ( fBlockNmi
5599 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5600 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5601
5602 /*
5603 * Do recursion accounting.
5604 */
5605 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5606 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5607 if (pVCpu->iem.s.cXcptRecursions == 0)
5608 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5609 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5610 else
5611 {
5612 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5613 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5614 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5615
5616 if (pVCpu->iem.s.cXcptRecursions >= 4)
5617 {
5618#ifdef DEBUG_bird
5619 AssertFailed();
5620#endif
5621 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5622 }
5623
5624 /*
5625 * Evaluate the sequence of recurring events.
5626 */
5627 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5628 NULL /* pXcptRaiseInfo */);
5629 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5630 { /* likely */ }
5631 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5632 {
5633 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5634 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5635 u8Vector = X86_XCPT_DF;
5636 uErr = 0;
5637#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5638 /* VMX nested-guest #DF intercept needs to be checked here. */
5639 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5640 {
5641 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5642 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5643 return rcStrict0;
5644 }
5645#endif
5646 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5647 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5648 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5649 }
5650 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5651 {
5652 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5653 return iemInitiateCpuShutdown(pVCpu);
5654 }
5655 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5656 {
5657 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5658 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5659 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5660 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5661 return VERR_EM_GUEST_CPU_HANG;
5662 }
5663 else
5664 {
5665 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5666 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5667 return VERR_IEM_IPE_9;
5668 }
5669
5670 /*
5671 * The 'EXT' bit is set when an exception occurs during deliver of an external
5672 * event (such as an interrupt or earlier exception)[1]. Privileged software
5673 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5674 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5675 *
5676 * [1] - Intel spec. 6.13 "Error Code"
5677 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5678 * [3] - Intel Instruction reference for INT n.
5679 */
5680 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5681 && (fFlags & IEM_XCPT_FLAGS_ERR)
5682 && u8Vector != X86_XCPT_PF
5683 && u8Vector != X86_XCPT_DF)
5684 {
5685 uErr |= X86_TRAP_ERR_EXTERNAL;
5686 }
5687 }
5688
5689 pVCpu->iem.s.cXcptRecursions++;
5690 pVCpu->iem.s.uCurXcpt = u8Vector;
5691 pVCpu->iem.s.fCurXcpt = fFlags;
5692 pVCpu->iem.s.uCurXcptErr = uErr;
5693 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5694
5695 /*
5696 * Extensive logging.
5697 */
5698#if defined(LOG_ENABLED) && defined(IN_RING3)
5699 if (LogIs3Enabled())
5700 {
5701 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5702 PVM pVM = pVCpu->CTX_SUFF(pVM);
5703 char szRegs[4096];
5704 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5705 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5706 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5707 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5708 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5709 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5710 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5711 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5712 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5713 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5714 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5715 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5716 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5717 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5718 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5719 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5720 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5721 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5722 " efer=%016VR{efer}\n"
5723 " pat=%016VR{pat}\n"
5724 " sf_mask=%016VR{sf_mask}\n"
5725 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5726 " lstar=%016VR{lstar}\n"
5727 " star=%016VR{star} cstar=%016VR{cstar}\n"
5728 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5729 );
5730
5731 char szInstr[256];
5732 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5733 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5734 szInstr, sizeof(szInstr), NULL);
5735 Log3(("%s%s\n", szRegs, szInstr));
5736 }
5737#endif /* LOG_ENABLED */
5738
5739 /*
5740 * Call the mode specific worker function.
5741 */
5742 VBOXSTRICTRC rcStrict;
5743 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5744 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5745 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5746 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5747 else
5748 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5749
5750 /* Flush the prefetch buffer. */
5751#ifdef IEM_WITH_CODE_TLB
5752 pVCpu->iem.s.pbInstrBuf = NULL;
5753#else
5754 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5755#endif
5756
5757 /*
5758 * Unwind.
5759 */
5760 pVCpu->iem.s.cXcptRecursions--;
5761 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5762 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5763 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5764 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5765 pVCpu->iem.s.cXcptRecursions + 1));
5766 return rcStrict;
5767}
5768
5769#ifdef IEM_WITH_SETJMP
5770/**
5771 * See iemRaiseXcptOrInt. Will not return.
5772 */
5773IEM_STATIC DECL_NO_RETURN(void)
5774iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5775 uint8_t cbInstr,
5776 uint8_t u8Vector,
5777 uint32_t fFlags,
5778 uint16_t uErr,
5779 uint64_t uCr2)
5780{
5781 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5782 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5783}
5784#endif
5785
5786
5787/** \#DE - 00. */
5788DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5789{
5790 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5791}
5792
5793
5794/** \#DB - 01.
5795 * @note This automatically clear DR7.GD. */
5796DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5797{
5798 /** @todo set/clear RF. */
5799 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5800 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5801}
5802
5803
5804/** \#BR - 05. */
5805DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5806{
5807 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5808}
5809
5810
5811/** \#UD - 06. */
5812DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5813{
5814 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5815}
5816
5817
5818/** \#NM - 07. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5820{
5821 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5822}
5823
5824
5825/** \#TS(err) - 0a. */
5826DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5827{
5828 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5829}
5830
5831
5832/** \#TS(tr) - 0a. */
5833DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5834{
5835 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5836 pVCpu->cpum.GstCtx.tr.Sel, 0);
5837}
5838
5839
5840/** \#TS(0) - 0a. */
5841DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5842{
5843 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5844 0, 0);
5845}
5846
5847
5848/** \#TS(err) - 0a. */
5849DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5850{
5851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5852 uSel & X86_SEL_MASK_OFF_RPL, 0);
5853}
5854
5855
5856/** \#NP(err) - 0b. */
5857DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5858{
5859 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5860}
5861
5862
5863/** \#NP(sel) - 0b. */
5864DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5865{
5866 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5867 uSel & ~X86_SEL_RPL, 0);
5868}
5869
5870
5871/** \#SS(seg) - 0c. */
5872DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5873{
5874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5875 uSel & ~X86_SEL_RPL, 0);
5876}
5877
5878
5879/** \#SS(err) - 0c. */
5880DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5881{
5882 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5883}
5884
5885
5886/** \#GP(n) - 0d. */
5887DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5888{
5889 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5890}
5891
5892
5893/** \#GP(0) - 0d. */
5894DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5895{
5896 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5897}
5898
5899#ifdef IEM_WITH_SETJMP
5900/** \#GP(0) - 0d. */
5901DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5902{
5903 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5904}
5905#endif
5906
5907
5908/** \#GP(sel) - 0d. */
5909DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5910{
5911 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5912 Sel & ~X86_SEL_RPL, 0);
5913}
5914
5915
5916/** \#GP(0) - 0d. */
5917DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5918{
5919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5920}
5921
5922
5923/** \#GP(sel) - 0d. */
5924DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5925{
5926 NOREF(iSegReg); NOREF(fAccess);
5927 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5928 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5929}
5930
5931#ifdef IEM_WITH_SETJMP
5932/** \#GP(sel) - 0d, longjmp. */
5933DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5934{
5935 NOREF(iSegReg); NOREF(fAccess);
5936 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5937 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5938}
5939#endif
5940
5941/** \#GP(sel) - 0d. */
5942DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5943{
5944 NOREF(Sel);
5945 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5946}
5947
5948#ifdef IEM_WITH_SETJMP
5949/** \#GP(sel) - 0d, longjmp. */
5950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5951{
5952 NOREF(Sel);
5953 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5954}
5955#endif
5956
5957
5958/** \#GP(sel) - 0d. */
5959DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5960{
5961 NOREF(iSegReg); NOREF(fAccess);
5962 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5963}
5964
5965#ifdef IEM_WITH_SETJMP
5966/** \#GP(sel) - 0d, longjmp. */
5967DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5968 uint32_t fAccess)
5969{
5970 NOREF(iSegReg); NOREF(fAccess);
5971 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5972}
5973#endif
5974
5975
5976/** \#PF(n) - 0e. */
5977DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5978{
5979 uint16_t uErr;
5980 switch (rc)
5981 {
5982 case VERR_PAGE_NOT_PRESENT:
5983 case VERR_PAGE_TABLE_NOT_PRESENT:
5984 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5985 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5986 uErr = 0;
5987 break;
5988
5989 default:
5990 AssertMsgFailed(("%Rrc\n", rc));
5991 RT_FALL_THRU();
5992 case VERR_ACCESS_DENIED:
5993 uErr = X86_TRAP_PF_P;
5994 break;
5995
5996 /** @todo reserved */
5997 }
5998
5999 if (pVCpu->iem.s.uCpl == 3)
6000 uErr |= X86_TRAP_PF_US;
6001
6002 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
6003 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6004 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
6005 uErr |= X86_TRAP_PF_ID;
6006
6007#if 0 /* This is so much non-sense, really. Why was it done like that? */
6008 /* Note! RW access callers reporting a WRITE protection fault, will clear
6009 the READ flag before calling. So, read-modify-write accesses (RW)
6010 can safely be reported as READ faults. */
6011 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6012 uErr |= X86_TRAP_PF_RW;
6013#else
6014 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6015 {
6016 if (!(fAccess & IEM_ACCESS_TYPE_READ))
6017 uErr |= X86_TRAP_PF_RW;
6018 }
6019#endif
6020
6021 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6022 uErr, GCPtrWhere);
6023}
6024
6025#ifdef IEM_WITH_SETJMP
6026/** \#PF(n) - 0e, longjmp. */
6027IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6028{
6029 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6030}
6031#endif
6032
6033
6034/** \#MF(0) - 10. */
6035DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6036{
6037 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6038}
6039
6040
6041/** \#AC(0) - 11. */
6042DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6043{
6044 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6045}
6046
6047
6048/**
6049 * Macro for calling iemCImplRaiseDivideError().
6050 *
6051 * This enables us to add/remove arguments and force different levels of
6052 * inlining as we wish.
6053 *
6054 * @return Strict VBox status code.
6055 */
6056#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6057IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6058{
6059 NOREF(cbInstr);
6060 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6061}
6062
6063
6064/**
6065 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6066 *
6067 * This enables us to add/remove arguments and force different levels of
6068 * inlining as we wish.
6069 *
6070 * @return Strict VBox status code.
6071 */
6072#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6073IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6074{
6075 NOREF(cbInstr);
6076 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6077}
6078
6079
6080/**
6081 * Macro for calling iemCImplRaiseInvalidOpcode().
6082 *
6083 * This enables us to add/remove arguments and force different levels of
6084 * inlining as we wish.
6085 *
6086 * @return Strict VBox status code.
6087 */
6088#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6089IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6090{
6091 NOREF(cbInstr);
6092 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6093}
6094
6095
6096/** @} */
6097
6098
6099/*
6100 *
6101 * Helpers routines.
6102 * Helpers routines.
6103 * Helpers routines.
6104 *
6105 */
6106
6107/**
6108 * Recalculates the effective operand size.
6109 *
6110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6111 */
6112IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6113{
6114 switch (pVCpu->iem.s.enmCpuMode)
6115 {
6116 case IEMMODE_16BIT:
6117 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6118 break;
6119 case IEMMODE_32BIT:
6120 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6121 break;
6122 case IEMMODE_64BIT:
6123 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6124 {
6125 case 0:
6126 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6127 break;
6128 case IEM_OP_PRF_SIZE_OP:
6129 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6130 break;
6131 case IEM_OP_PRF_SIZE_REX_W:
6132 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6133 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6134 break;
6135 }
6136 break;
6137 default:
6138 AssertFailed();
6139 }
6140}
6141
6142
6143/**
6144 * Sets the default operand size to 64-bit and recalculates the effective
6145 * operand size.
6146 *
6147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6148 */
6149IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6150{
6151 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6152 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6153 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6154 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6155 else
6156 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6157}
6158
6159
6160/*
6161 *
6162 * Common opcode decoders.
6163 * Common opcode decoders.
6164 * Common opcode decoders.
6165 *
6166 */
6167//#include <iprt/mem.h>
6168
6169/**
6170 * Used to add extra details about a stub case.
6171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6172 */
6173IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6174{
6175#if defined(LOG_ENABLED) && defined(IN_RING3)
6176 PVM pVM = pVCpu->CTX_SUFF(pVM);
6177 char szRegs[4096];
6178 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6179 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6180 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6181 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6182 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6183 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6184 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6185 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6186 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6187 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6188 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6189 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6190 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6191 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6192 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6193 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6194 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6195 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6196 " efer=%016VR{efer}\n"
6197 " pat=%016VR{pat}\n"
6198 " sf_mask=%016VR{sf_mask}\n"
6199 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6200 " lstar=%016VR{lstar}\n"
6201 " star=%016VR{star} cstar=%016VR{cstar}\n"
6202 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6203 );
6204
6205 char szInstr[256];
6206 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6207 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6208 szInstr, sizeof(szInstr), NULL);
6209
6210 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6211#else
6212 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6213#endif
6214}
6215
6216/**
6217 * Complains about a stub.
6218 *
6219 * Providing two versions of this macro, one for daily use and one for use when
6220 * working on IEM.
6221 */
6222#if 0
6223# define IEMOP_BITCH_ABOUT_STUB() \
6224 do { \
6225 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6226 iemOpStubMsg2(pVCpu); \
6227 RTAssertPanic(); \
6228 } while (0)
6229#else
6230# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6231#endif
6232
6233/** Stubs an opcode. */
6234#define FNIEMOP_STUB(a_Name) \
6235 FNIEMOP_DEF(a_Name) \
6236 { \
6237 RT_NOREF_PV(pVCpu); \
6238 IEMOP_BITCH_ABOUT_STUB(); \
6239 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6240 } \
6241 typedef int ignore_semicolon
6242
6243/** Stubs an opcode. */
6244#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6245 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6246 { \
6247 RT_NOREF_PV(pVCpu); \
6248 RT_NOREF_PV(a_Name0); \
6249 IEMOP_BITCH_ABOUT_STUB(); \
6250 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6251 } \
6252 typedef int ignore_semicolon
6253
6254/** Stubs an opcode which currently should raise \#UD. */
6255#define FNIEMOP_UD_STUB(a_Name) \
6256 FNIEMOP_DEF(a_Name) \
6257 { \
6258 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6259 return IEMOP_RAISE_INVALID_OPCODE(); \
6260 } \
6261 typedef int ignore_semicolon
6262
6263/** Stubs an opcode which currently should raise \#UD. */
6264#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6265 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6266 { \
6267 RT_NOREF_PV(pVCpu); \
6268 RT_NOREF_PV(a_Name0); \
6269 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6270 return IEMOP_RAISE_INVALID_OPCODE(); \
6271 } \
6272 typedef int ignore_semicolon
6273
6274
6275
6276/** @name Register Access.
6277 * @{
6278 */
6279
6280/**
6281 * Gets a reference (pointer) to the specified hidden segment register.
6282 *
6283 * @returns Hidden register reference.
6284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6285 * @param iSegReg The segment register.
6286 */
6287IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6288{
6289 Assert(iSegReg < X86_SREG_COUNT);
6290 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6291 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6292
6293#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6294 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6295 { /* likely */ }
6296 else
6297 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6298#else
6299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6300#endif
6301 return pSReg;
6302}
6303
6304
6305/**
6306 * Ensures that the given hidden segment register is up to date.
6307 *
6308 * @returns Hidden register reference.
6309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6310 * @param pSReg The segment register.
6311 */
6312IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6313{
6314#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6315 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6316 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6317#else
6318 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6319 NOREF(pVCpu);
6320#endif
6321 return pSReg;
6322}
6323
6324
6325/**
6326 * Gets a reference (pointer) to the specified segment register (the selector
6327 * value).
6328 *
6329 * @returns Pointer to the selector variable.
6330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6331 * @param iSegReg The segment register.
6332 */
6333DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6334{
6335 Assert(iSegReg < X86_SREG_COUNT);
6336 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6337 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6338}
6339
6340
6341/**
6342 * Fetches the selector value of a segment register.
6343 *
6344 * @returns The selector value.
6345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6346 * @param iSegReg The segment register.
6347 */
6348DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6349{
6350 Assert(iSegReg < X86_SREG_COUNT);
6351 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6352 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6353}
6354
6355
6356/**
6357 * Fetches the base address value of a segment register.
6358 *
6359 * @returns The selector value.
6360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6361 * @param iSegReg The segment register.
6362 */
6363DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6364{
6365 Assert(iSegReg < X86_SREG_COUNT);
6366 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6367 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6368}
6369
6370
6371/**
6372 * Gets a reference (pointer) to the specified general purpose register.
6373 *
6374 * @returns Register reference.
6375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6376 * @param iReg The general purpose register.
6377 */
6378DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6379{
6380 Assert(iReg < 16);
6381 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6382}
6383
6384
6385/**
6386 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6387 *
6388 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6389 *
6390 * @returns Register reference.
6391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6392 * @param iReg The register.
6393 */
6394DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6395{
6396 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6397 {
6398 Assert(iReg < 16);
6399 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6400 }
6401 /* high 8-bit register. */
6402 Assert(iReg < 8);
6403 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6404}
6405
6406
6407/**
6408 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6409 *
6410 * @returns Register reference.
6411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6412 * @param iReg The register.
6413 */
6414DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6415{
6416 Assert(iReg < 16);
6417 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6418}
6419
6420
6421/**
6422 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6423 *
6424 * @returns Register reference.
6425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6426 * @param iReg The register.
6427 */
6428DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6429{
6430 Assert(iReg < 16);
6431 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6432}
6433
6434
6435/**
6436 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6437 *
6438 * @returns Register reference.
6439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6440 * @param iReg The register.
6441 */
6442DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6443{
6444 Assert(iReg < 64);
6445 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6446}
6447
6448
6449/**
6450 * Gets a reference (pointer) to the specified segment register's base address.
6451 *
6452 * @returns Segment register base address reference.
6453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6454 * @param iSegReg The segment selector.
6455 */
6456DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6457{
6458 Assert(iSegReg < X86_SREG_COUNT);
6459 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6460 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6461}
6462
6463
6464/**
6465 * Fetches the value of a 8-bit general purpose register.
6466 *
6467 * @returns The register value.
6468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6469 * @param iReg The register.
6470 */
6471DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6472{
6473 return *iemGRegRefU8(pVCpu, iReg);
6474}
6475
6476
6477/**
6478 * Fetches the value of a 16-bit general purpose register.
6479 *
6480 * @returns The register value.
6481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6482 * @param iReg The register.
6483 */
6484DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6485{
6486 Assert(iReg < 16);
6487 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6488}
6489
6490
6491/**
6492 * Fetches the value of a 32-bit general purpose register.
6493 *
6494 * @returns The register value.
6495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6496 * @param iReg The register.
6497 */
6498DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6499{
6500 Assert(iReg < 16);
6501 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6502}
6503
6504
6505/**
6506 * Fetches the value of a 64-bit general purpose register.
6507 *
6508 * @returns The register value.
6509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6510 * @param iReg The register.
6511 */
6512DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6513{
6514 Assert(iReg < 16);
6515 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6516}
6517
6518
6519/**
6520 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6521 *
6522 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6523 * segment limit.
6524 *
6525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6526 * @param offNextInstr The offset of the next instruction.
6527 */
6528IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6529{
6530 switch (pVCpu->iem.s.enmEffOpSize)
6531 {
6532 case IEMMODE_16BIT:
6533 {
6534 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6535 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6536 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6537 return iemRaiseGeneralProtectionFault0(pVCpu);
6538 pVCpu->cpum.GstCtx.rip = uNewIp;
6539 break;
6540 }
6541
6542 case IEMMODE_32BIT:
6543 {
6544 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6545 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6546
6547 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6548 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6549 return iemRaiseGeneralProtectionFault0(pVCpu);
6550 pVCpu->cpum.GstCtx.rip = uNewEip;
6551 break;
6552 }
6553
6554 case IEMMODE_64BIT:
6555 {
6556 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6557
6558 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6559 if (!IEM_IS_CANONICAL(uNewRip))
6560 return iemRaiseGeneralProtectionFault0(pVCpu);
6561 pVCpu->cpum.GstCtx.rip = uNewRip;
6562 break;
6563 }
6564
6565 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6566 }
6567
6568 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6569
6570#ifndef IEM_WITH_CODE_TLB
6571 /* Flush the prefetch buffer. */
6572 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6573#endif
6574
6575 return VINF_SUCCESS;
6576}
6577
6578
6579/**
6580 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6581 *
6582 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6583 * segment limit.
6584 *
6585 * @returns Strict VBox status code.
6586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6587 * @param offNextInstr The offset of the next instruction.
6588 */
6589IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6590{
6591 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6592
6593 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6594 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6595 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6596 return iemRaiseGeneralProtectionFault0(pVCpu);
6597 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6598 pVCpu->cpum.GstCtx.rip = uNewIp;
6599 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6600
6601#ifndef IEM_WITH_CODE_TLB
6602 /* Flush the prefetch buffer. */
6603 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6604#endif
6605
6606 return VINF_SUCCESS;
6607}
6608
6609
6610/**
6611 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6612 *
6613 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6614 * segment limit.
6615 *
6616 * @returns Strict VBox status code.
6617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6618 * @param offNextInstr The offset of the next instruction.
6619 */
6620IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6621{
6622 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6623
6624 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6625 {
6626 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6627
6628 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6629 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6630 return iemRaiseGeneralProtectionFault0(pVCpu);
6631 pVCpu->cpum.GstCtx.rip = uNewEip;
6632 }
6633 else
6634 {
6635 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6636
6637 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6638 if (!IEM_IS_CANONICAL(uNewRip))
6639 return iemRaiseGeneralProtectionFault0(pVCpu);
6640 pVCpu->cpum.GstCtx.rip = uNewRip;
6641 }
6642 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6643
6644#ifndef IEM_WITH_CODE_TLB
6645 /* Flush the prefetch buffer. */
6646 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6647#endif
6648
6649 return VINF_SUCCESS;
6650}
6651
6652
6653/**
6654 * Performs a near jump to the specified address.
6655 *
6656 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6657 * segment limit.
6658 *
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 * @param uNewRip The new RIP value.
6661 */
6662IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6663{
6664 switch (pVCpu->iem.s.enmEffOpSize)
6665 {
6666 case IEMMODE_16BIT:
6667 {
6668 Assert(uNewRip <= UINT16_MAX);
6669 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6670 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6671 return iemRaiseGeneralProtectionFault0(pVCpu);
6672 /** @todo Test 16-bit jump in 64-bit mode. */
6673 pVCpu->cpum.GstCtx.rip = uNewRip;
6674 break;
6675 }
6676
6677 case IEMMODE_32BIT:
6678 {
6679 Assert(uNewRip <= UINT32_MAX);
6680 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6681 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6682
6683 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6684 return iemRaiseGeneralProtectionFault0(pVCpu);
6685 pVCpu->cpum.GstCtx.rip = uNewRip;
6686 break;
6687 }
6688
6689 case IEMMODE_64BIT:
6690 {
6691 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6692
6693 if (!IEM_IS_CANONICAL(uNewRip))
6694 return iemRaiseGeneralProtectionFault0(pVCpu);
6695 pVCpu->cpum.GstCtx.rip = uNewRip;
6696 break;
6697 }
6698
6699 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6700 }
6701
6702 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6703
6704#ifndef IEM_WITH_CODE_TLB
6705 /* Flush the prefetch buffer. */
6706 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6707#endif
6708
6709 return VINF_SUCCESS;
6710}
6711
6712
6713/**
6714 * Get the address of the top of the stack.
6715 *
6716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6717 */
6718DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6719{
6720 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6721 return pVCpu->cpum.GstCtx.rsp;
6722 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6723 return pVCpu->cpum.GstCtx.esp;
6724 return pVCpu->cpum.GstCtx.sp;
6725}
6726
6727
6728/**
6729 * Updates the RIP/EIP/IP to point to the next instruction.
6730 *
6731 * This function leaves the EFLAGS.RF flag alone.
6732 *
6733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6734 * @param cbInstr The number of bytes to add.
6735 */
6736IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6737{
6738 switch (pVCpu->iem.s.enmCpuMode)
6739 {
6740 case IEMMODE_16BIT:
6741 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6742 pVCpu->cpum.GstCtx.eip += cbInstr;
6743 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6744 break;
6745
6746 case IEMMODE_32BIT:
6747 pVCpu->cpum.GstCtx.eip += cbInstr;
6748 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6749 break;
6750
6751 case IEMMODE_64BIT:
6752 pVCpu->cpum.GstCtx.rip += cbInstr;
6753 break;
6754 default: AssertFailed();
6755 }
6756}
6757
6758
6759#if 0
6760/**
6761 * Updates the RIP/EIP/IP to point to the next instruction.
6762 *
6763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6764 */
6765IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6766{
6767 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6768}
6769#endif
6770
6771
6772
6773/**
6774 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6775 *
6776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6777 * @param cbInstr The number of bytes to add.
6778 */
6779IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6780{
6781 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6782
6783 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6784#if ARCH_BITS >= 64
6785 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6786 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6787 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6788#else
6789 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6790 pVCpu->cpum.GstCtx.rip += cbInstr;
6791 else
6792 pVCpu->cpum.GstCtx.eip += cbInstr;
6793#endif
6794}
6795
6796
6797/**
6798 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6799 *
6800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6801 */
6802IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6803{
6804 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6805}
6806
6807
6808/**
6809 * Adds to the stack pointer.
6810 *
6811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6812 * @param cbToAdd The number of bytes to add (8-bit!).
6813 */
6814DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6815{
6816 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6817 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6818 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6819 pVCpu->cpum.GstCtx.esp += cbToAdd;
6820 else
6821 pVCpu->cpum.GstCtx.sp += cbToAdd;
6822}
6823
6824
6825/**
6826 * Subtracts from the stack pointer.
6827 *
6828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6829 * @param cbToSub The number of bytes to subtract (8-bit!).
6830 */
6831DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6832{
6833 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6834 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6835 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6836 pVCpu->cpum.GstCtx.esp -= cbToSub;
6837 else
6838 pVCpu->cpum.GstCtx.sp -= cbToSub;
6839}
6840
6841
6842/**
6843 * Adds to the temporary stack pointer.
6844 *
6845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6846 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6847 * @param cbToAdd The number of bytes to add (16-bit).
6848 */
6849DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6850{
6851 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6852 pTmpRsp->u += cbToAdd;
6853 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6854 pTmpRsp->DWords.dw0 += cbToAdd;
6855 else
6856 pTmpRsp->Words.w0 += cbToAdd;
6857}
6858
6859
6860/**
6861 * Subtracts from the temporary stack pointer.
6862 *
6863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6864 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6865 * @param cbToSub The number of bytes to subtract.
6866 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6867 * expecting that.
6868 */
6869DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6870{
6871 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6872 pTmpRsp->u -= cbToSub;
6873 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6874 pTmpRsp->DWords.dw0 -= cbToSub;
6875 else
6876 pTmpRsp->Words.w0 -= cbToSub;
6877}
6878
6879
6880/**
6881 * Calculates the effective stack address for a push of the specified size as
6882 * well as the new RSP value (upper bits may be masked).
6883 *
6884 * @returns Effective stack addressf for the push.
6885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6886 * @param cbItem The size of the stack item to pop.
6887 * @param puNewRsp Where to return the new RSP value.
6888 */
6889DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6890{
6891 RTUINT64U uTmpRsp;
6892 RTGCPTR GCPtrTop;
6893 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6894
6895 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6896 GCPtrTop = uTmpRsp.u -= cbItem;
6897 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6898 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6899 else
6900 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6901 *puNewRsp = uTmpRsp.u;
6902 return GCPtrTop;
6903}
6904
6905
6906/**
6907 * Gets the current stack pointer and calculates the value after a pop of the
6908 * specified size.
6909 *
6910 * @returns Current stack pointer.
6911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6912 * @param cbItem The size of the stack item to pop.
6913 * @param puNewRsp Where to return the new RSP value.
6914 */
6915DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6916{
6917 RTUINT64U uTmpRsp;
6918 RTGCPTR GCPtrTop;
6919 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6920
6921 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6922 {
6923 GCPtrTop = uTmpRsp.u;
6924 uTmpRsp.u += cbItem;
6925 }
6926 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6927 {
6928 GCPtrTop = uTmpRsp.DWords.dw0;
6929 uTmpRsp.DWords.dw0 += cbItem;
6930 }
6931 else
6932 {
6933 GCPtrTop = uTmpRsp.Words.w0;
6934 uTmpRsp.Words.w0 += cbItem;
6935 }
6936 *puNewRsp = uTmpRsp.u;
6937 return GCPtrTop;
6938}
6939
6940
6941/**
6942 * Calculates the effective stack address for a push of the specified size as
6943 * well as the new temporary RSP value (upper bits may be masked).
6944 *
6945 * @returns Effective stack addressf for the push.
6946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6947 * @param pTmpRsp The temporary stack pointer. This is updated.
6948 * @param cbItem The size of the stack item to pop.
6949 */
6950DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6951{
6952 RTGCPTR GCPtrTop;
6953
6954 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6955 GCPtrTop = pTmpRsp->u -= cbItem;
6956 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6957 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6958 else
6959 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6960 return GCPtrTop;
6961}
6962
6963
6964/**
6965 * Gets the effective stack address for a pop of the specified size and
6966 * calculates and updates the temporary RSP.
6967 *
6968 * @returns Current stack pointer.
6969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6970 * @param pTmpRsp The temporary stack pointer. This is updated.
6971 * @param cbItem The size of the stack item to pop.
6972 */
6973DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6974{
6975 RTGCPTR GCPtrTop;
6976 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6977 {
6978 GCPtrTop = pTmpRsp->u;
6979 pTmpRsp->u += cbItem;
6980 }
6981 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6982 {
6983 GCPtrTop = pTmpRsp->DWords.dw0;
6984 pTmpRsp->DWords.dw0 += cbItem;
6985 }
6986 else
6987 {
6988 GCPtrTop = pTmpRsp->Words.w0;
6989 pTmpRsp->Words.w0 += cbItem;
6990 }
6991 return GCPtrTop;
6992}
6993
6994/** @} */
6995
6996
6997/** @name FPU access and helpers.
6998 *
6999 * @{
7000 */
7001
7002
7003/**
7004 * Hook for preparing to use the host FPU.
7005 *
7006 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7007 *
7008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7009 */
7010DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
7011{
7012#ifdef IN_RING3
7013 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7014#else
7015 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7016#endif
7017 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7018}
7019
7020
7021/**
7022 * Hook for preparing to use the host FPU for SSE.
7023 *
7024 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7025 *
7026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7027 */
7028DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
7029{
7030 iemFpuPrepareUsage(pVCpu);
7031}
7032
7033
7034/**
7035 * Hook for preparing to use the host FPU for AVX.
7036 *
7037 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7038 *
7039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7040 */
7041DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7042{
7043 iemFpuPrepareUsage(pVCpu);
7044}
7045
7046
7047/**
7048 * Hook for actualizing the guest FPU state before the interpreter reads it.
7049 *
7050 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7051 *
7052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7053 */
7054DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7055{
7056#ifdef IN_RING3
7057 NOREF(pVCpu);
7058#else
7059 CPUMRZFpuStateActualizeForRead(pVCpu);
7060#endif
7061 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7062}
7063
7064
7065/**
7066 * Hook for actualizing the guest FPU state before the interpreter changes it.
7067 *
7068 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7069 *
7070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7071 */
7072DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7073{
7074#ifdef IN_RING3
7075 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7076#else
7077 CPUMRZFpuStateActualizeForChange(pVCpu);
7078#endif
7079 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7080}
7081
7082
7083/**
7084 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7085 * only.
7086 *
7087 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7088 *
7089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7090 */
7091DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7092{
7093#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7094 NOREF(pVCpu);
7095#else
7096 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7097#endif
7098 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7099}
7100
7101
7102/**
7103 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7104 * read+write.
7105 *
7106 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7107 *
7108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7109 */
7110DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7111{
7112#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7113 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7114#else
7115 CPUMRZFpuStateActualizeForChange(pVCpu);
7116#endif
7117 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7118}
7119
7120
7121/**
7122 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7123 * only.
7124 *
7125 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7126 *
7127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7128 */
7129DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7130{
7131#ifdef IN_RING3
7132 NOREF(pVCpu);
7133#else
7134 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7135#endif
7136 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7137}
7138
7139
7140/**
7141 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7142 * read+write.
7143 *
7144 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7145 *
7146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7147 */
7148DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7149{
7150#ifdef IN_RING3
7151 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7152#else
7153 CPUMRZFpuStateActualizeForChange(pVCpu);
7154#endif
7155 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7156}
7157
7158
7159/**
7160 * Stores a QNaN value into a FPU register.
7161 *
7162 * @param pReg Pointer to the register.
7163 */
7164DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7165{
7166 pReg->au32[0] = UINT32_C(0x00000000);
7167 pReg->au32[1] = UINT32_C(0xc0000000);
7168 pReg->au16[4] = UINT16_C(0xffff);
7169}
7170
7171
7172/**
7173 * Updates the FOP, FPU.CS and FPUIP registers.
7174 *
7175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7176 * @param pFpuCtx The FPU context.
7177 */
7178DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7179{
7180 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7181 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7182 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7183 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7184 {
7185 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7186 * happens in real mode here based on the fnsave and fnstenv images. */
7187 pFpuCtx->CS = 0;
7188 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7189 }
7190 else
7191 {
7192 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7193 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7194 }
7195}
7196
7197
7198/**
7199 * Updates the x87.DS and FPUDP registers.
7200 *
7201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7202 * @param pFpuCtx The FPU context.
7203 * @param iEffSeg The effective segment register.
7204 * @param GCPtrEff The effective address relative to @a iEffSeg.
7205 */
7206DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7207{
7208 RTSEL sel;
7209 switch (iEffSeg)
7210 {
7211 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7212 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7213 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7214 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7215 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7216 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7217 default:
7218 AssertMsgFailed(("%d\n", iEffSeg));
7219 sel = pVCpu->cpum.GstCtx.ds.Sel;
7220 }
7221 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7222 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7223 {
7224 pFpuCtx->DS = 0;
7225 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7226 }
7227 else
7228 {
7229 pFpuCtx->DS = sel;
7230 pFpuCtx->FPUDP = GCPtrEff;
7231 }
7232}
7233
7234
7235/**
7236 * Rotates the stack registers in the push direction.
7237 *
7238 * @param pFpuCtx The FPU context.
7239 * @remarks This is a complete waste of time, but fxsave stores the registers in
7240 * stack order.
7241 */
7242DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7243{
7244 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7245 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7246 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7247 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7248 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7249 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7250 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7251 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7252 pFpuCtx->aRegs[0].r80 = r80Tmp;
7253}
7254
7255
7256/**
7257 * Rotates the stack registers in the pop direction.
7258 *
7259 * @param pFpuCtx The FPU context.
7260 * @remarks This is a complete waste of time, but fxsave stores the registers in
7261 * stack order.
7262 */
7263DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7264{
7265 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7266 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7267 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7268 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7269 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7270 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7271 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7272 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7273 pFpuCtx->aRegs[7].r80 = r80Tmp;
7274}
7275
7276
7277/**
7278 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7279 * exception prevents it.
7280 *
7281 * @param pResult The FPU operation result to push.
7282 * @param pFpuCtx The FPU context.
7283 */
7284IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7285{
7286 /* Update FSW and bail if there are pending exceptions afterwards. */
7287 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7288 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7289 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7290 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7291 {
7292 pFpuCtx->FSW = fFsw;
7293 return;
7294 }
7295
7296 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7297 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7298 {
7299 /* All is fine, push the actual value. */
7300 pFpuCtx->FTW |= RT_BIT(iNewTop);
7301 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7302 }
7303 else if (pFpuCtx->FCW & X86_FCW_IM)
7304 {
7305 /* Masked stack overflow, push QNaN. */
7306 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7307 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7308 }
7309 else
7310 {
7311 /* Raise stack overflow, don't push anything. */
7312 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7313 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7314 return;
7315 }
7316
7317 fFsw &= ~X86_FSW_TOP_MASK;
7318 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7319 pFpuCtx->FSW = fFsw;
7320
7321 iemFpuRotateStackPush(pFpuCtx);
7322}
7323
7324
7325/**
7326 * Stores a result in a FPU register and updates the FSW and FTW.
7327 *
7328 * @param pFpuCtx The FPU context.
7329 * @param pResult The result to store.
7330 * @param iStReg Which FPU register to store it in.
7331 */
7332IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7333{
7334 Assert(iStReg < 8);
7335 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7336 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7337 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7338 pFpuCtx->FTW |= RT_BIT(iReg);
7339 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7340}
7341
7342
7343/**
7344 * Only updates the FPU status word (FSW) with the result of the current
7345 * instruction.
7346 *
7347 * @param pFpuCtx The FPU context.
7348 * @param u16FSW The FSW output of the current instruction.
7349 */
7350IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7351{
7352 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7353 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7354}
7355
7356
7357/**
7358 * Pops one item off the FPU stack if no pending exception prevents it.
7359 *
7360 * @param pFpuCtx The FPU context.
7361 */
7362IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7363{
7364 /* Check pending exceptions. */
7365 uint16_t uFSW = pFpuCtx->FSW;
7366 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7367 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7368 return;
7369
7370 /* TOP--. */
7371 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7372 uFSW &= ~X86_FSW_TOP_MASK;
7373 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7374 pFpuCtx->FSW = uFSW;
7375
7376 /* Mark the previous ST0 as empty. */
7377 iOldTop >>= X86_FSW_TOP_SHIFT;
7378 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7379
7380 /* Rotate the registers. */
7381 iemFpuRotateStackPop(pFpuCtx);
7382}
7383
7384
7385/**
7386 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7387 *
7388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7389 * @param pResult The FPU operation result to push.
7390 */
7391IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7392{
7393 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7394 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7395 iemFpuMaybePushResult(pResult, pFpuCtx);
7396}
7397
7398
7399/**
7400 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7401 * and sets FPUDP and FPUDS.
7402 *
7403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7404 * @param pResult The FPU operation result to push.
7405 * @param iEffSeg The effective segment register.
7406 * @param GCPtrEff The effective address relative to @a iEffSeg.
7407 */
7408IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7409{
7410 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7411 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7412 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7413 iemFpuMaybePushResult(pResult, pFpuCtx);
7414}
7415
7416
7417/**
7418 * Replace ST0 with the first value and push the second onto the FPU stack,
7419 * unless a pending exception prevents it.
7420 *
7421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7422 * @param pResult The FPU operation result to store and push.
7423 */
7424IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7425{
7426 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7427 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7428
7429 /* Update FSW and bail if there are pending exceptions afterwards. */
7430 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7431 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7432 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7433 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7434 {
7435 pFpuCtx->FSW = fFsw;
7436 return;
7437 }
7438
7439 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7440 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7441 {
7442 /* All is fine, push the actual value. */
7443 pFpuCtx->FTW |= RT_BIT(iNewTop);
7444 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7445 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7446 }
7447 else if (pFpuCtx->FCW & X86_FCW_IM)
7448 {
7449 /* Masked stack overflow, push QNaN. */
7450 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7451 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7452 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7453 }
7454 else
7455 {
7456 /* Raise stack overflow, don't push anything. */
7457 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7458 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7459 return;
7460 }
7461
7462 fFsw &= ~X86_FSW_TOP_MASK;
7463 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7464 pFpuCtx->FSW = fFsw;
7465
7466 iemFpuRotateStackPush(pFpuCtx);
7467}
7468
7469
7470/**
7471 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7472 * FOP.
7473 *
7474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7475 * @param pResult The result to store.
7476 * @param iStReg Which FPU register to store it in.
7477 */
7478IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7479{
7480 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7481 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7482 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7483}
7484
7485
7486/**
7487 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7488 * FOP, and then pops the stack.
7489 *
7490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7491 * @param pResult The result to store.
7492 * @param iStReg Which FPU register to store it in.
7493 */
7494IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7495{
7496 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7497 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7498 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7499 iemFpuMaybePopOne(pFpuCtx);
7500}
7501
7502
7503/**
7504 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7505 * FPUDP, and FPUDS.
7506 *
7507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7508 * @param pResult The result to store.
7509 * @param iStReg Which FPU register to store it in.
7510 * @param iEffSeg The effective memory operand selector register.
7511 * @param GCPtrEff The effective memory operand offset.
7512 */
7513IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7514 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7515{
7516 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7517 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7518 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7519 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7520}
7521
7522
7523/**
7524 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7525 * FPUDP, and FPUDS, and then pops the stack.
7526 *
7527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7528 * @param pResult The result to store.
7529 * @param iStReg Which FPU register to store it in.
7530 * @param iEffSeg The effective memory operand selector register.
7531 * @param GCPtrEff The effective memory operand offset.
7532 */
7533IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7534 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7535{
7536 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7537 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7538 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7539 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7540 iemFpuMaybePopOne(pFpuCtx);
7541}
7542
7543
7544/**
7545 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7546 *
7547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7548 */
7549IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7550{
7551 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7552 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7553}
7554
7555
7556/**
7557 * Marks the specified stack register as free (for FFREE).
7558 *
7559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7560 * @param iStReg The register to free.
7561 */
7562IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7563{
7564 Assert(iStReg < 8);
7565 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7566 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7567 pFpuCtx->FTW &= ~RT_BIT(iReg);
7568}
7569
7570
7571/**
7572 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7573 *
7574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7575 */
7576IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7577{
7578 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7579 uint16_t uFsw = pFpuCtx->FSW;
7580 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7581 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7582 uFsw &= ~X86_FSW_TOP_MASK;
7583 uFsw |= uTop;
7584 pFpuCtx->FSW = uFsw;
7585}
7586
7587
7588/**
7589 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7590 *
7591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7592 */
7593IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7594{
7595 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7596 uint16_t uFsw = pFpuCtx->FSW;
7597 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7598 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7599 uFsw &= ~X86_FSW_TOP_MASK;
7600 uFsw |= uTop;
7601 pFpuCtx->FSW = uFsw;
7602}
7603
7604
7605/**
7606 * Updates the FSW, FOP, FPUIP, and FPUCS.
7607 *
7608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7609 * @param u16FSW The FSW from the current instruction.
7610 */
7611IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7612{
7613 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7614 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7615 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7616}
7617
7618
7619/**
7620 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7621 *
7622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7623 * @param u16FSW The FSW from the current instruction.
7624 */
7625IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7626{
7627 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7628 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7629 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7630 iemFpuMaybePopOne(pFpuCtx);
7631}
7632
7633
7634/**
7635 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7636 *
7637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7638 * @param u16FSW The FSW from the current instruction.
7639 * @param iEffSeg The effective memory operand selector register.
7640 * @param GCPtrEff The effective memory operand offset.
7641 */
7642IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7643{
7644 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7645 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7646 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7647 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7648}
7649
7650
7651/**
7652 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7653 *
7654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7655 * @param u16FSW The FSW from the current instruction.
7656 */
7657IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7658{
7659 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7660 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7661 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7662 iemFpuMaybePopOne(pFpuCtx);
7663 iemFpuMaybePopOne(pFpuCtx);
7664}
7665
7666
7667/**
7668 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7669 *
7670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7671 * @param u16FSW The FSW from the current instruction.
7672 * @param iEffSeg The effective memory operand selector register.
7673 * @param GCPtrEff The effective memory operand offset.
7674 */
7675IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7676{
7677 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7678 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7679 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7680 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7681 iemFpuMaybePopOne(pFpuCtx);
7682}
7683
7684
7685/**
7686 * Worker routine for raising an FPU stack underflow exception.
7687 *
7688 * @param pFpuCtx The FPU context.
7689 * @param iStReg The stack register being accessed.
7690 */
7691IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7692{
7693 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7694 if (pFpuCtx->FCW & X86_FCW_IM)
7695 {
7696 /* Masked underflow. */
7697 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7699 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7700 if (iStReg != UINT8_MAX)
7701 {
7702 pFpuCtx->FTW |= RT_BIT(iReg);
7703 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7704 }
7705 }
7706 else
7707 {
7708 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7709 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7710 }
7711}
7712
7713
7714/**
7715 * Raises a FPU stack underflow exception.
7716 *
7717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7718 * @param iStReg The destination register that should be loaded
7719 * with QNaN if \#IS is not masked. Specify
7720 * UINT8_MAX if none (like for fcom).
7721 */
7722DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7723{
7724 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7725 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7726 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7727}
7728
7729
7730DECL_NO_INLINE(IEM_STATIC, void)
7731iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7732{
7733 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7734 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7735 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7736 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7737}
7738
7739
7740DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7741{
7742 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7743 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7744 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7745 iemFpuMaybePopOne(pFpuCtx);
7746}
7747
7748
7749DECL_NO_INLINE(IEM_STATIC, void)
7750iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7751{
7752 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7753 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7754 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7755 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7756 iemFpuMaybePopOne(pFpuCtx);
7757}
7758
7759
7760DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7761{
7762 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7763 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7764 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7765 iemFpuMaybePopOne(pFpuCtx);
7766 iemFpuMaybePopOne(pFpuCtx);
7767}
7768
7769
7770DECL_NO_INLINE(IEM_STATIC, void)
7771iemFpuStackPushUnderflow(PVMCPU pVCpu)
7772{
7773 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7774 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7775
7776 if (pFpuCtx->FCW & X86_FCW_IM)
7777 {
7778 /* Masked overflow - Push QNaN. */
7779 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7780 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7781 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7782 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7783 pFpuCtx->FTW |= RT_BIT(iNewTop);
7784 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7785 iemFpuRotateStackPush(pFpuCtx);
7786 }
7787 else
7788 {
7789 /* Exception pending - don't change TOP or the register stack. */
7790 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7791 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7792 }
7793}
7794
7795
7796DECL_NO_INLINE(IEM_STATIC, void)
7797iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7798{
7799 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7800 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7801
7802 if (pFpuCtx->FCW & X86_FCW_IM)
7803 {
7804 /* Masked overflow - Push QNaN. */
7805 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7806 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7807 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7808 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7809 pFpuCtx->FTW |= RT_BIT(iNewTop);
7810 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7811 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7812 iemFpuRotateStackPush(pFpuCtx);
7813 }
7814 else
7815 {
7816 /* Exception pending - don't change TOP or the register stack. */
7817 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7818 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7819 }
7820}
7821
7822
7823/**
7824 * Worker routine for raising an FPU stack overflow exception on a push.
7825 *
7826 * @param pFpuCtx The FPU context.
7827 */
7828IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7829{
7830 if (pFpuCtx->FCW & X86_FCW_IM)
7831 {
7832 /* Masked overflow. */
7833 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7834 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7835 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7836 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7837 pFpuCtx->FTW |= RT_BIT(iNewTop);
7838 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7839 iemFpuRotateStackPush(pFpuCtx);
7840 }
7841 else
7842 {
7843 /* Exception pending - don't change TOP or the register stack. */
7844 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7845 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7846 }
7847}
7848
7849
7850/**
7851 * Raises a FPU stack overflow exception on a push.
7852 *
7853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7854 */
7855DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7856{
7857 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7858 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7859 iemFpuStackPushOverflowOnly(pFpuCtx);
7860}
7861
7862
7863/**
7864 * Raises a FPU stack overflow exception on a push with a memory operand.
7865 *
7866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7867 * @param iEffSeg The effective memory operand selector register.
7868 * @param GCPtrEff The effective memory operand offset.
7869 */
7870DECL_NO_INLINE(IEM_STATIC, void)
7871iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7872{
7873 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7874 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7875 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7876 iemFpuStackPushOverflowOnly(pFpuCtx);
7877}
7878
7879
7880IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7881{
7882 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7883 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7884 if (pFpuCtx->FTW & RT_BIT(iReg))
7885 return VINF_SUCCESS;
7886 return VERR_NOT_FOUND;
7887}
7888
7889
7890IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7891{
7892 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7893 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7894 if (pFpuCtx->FTW & RT_BIT(iReg))
7895 {
7896 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7897 return VINF_SUCCESS;
7898 }
7899 return VERR_NOT_FOUND;
7900}
7901
7902
7903IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7904 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7905{
7906 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7907 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7908 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7909 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7910 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7911 {
7912 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7913 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7914 return VINF_SUCCESS;
7915 }
7916 return VERR_NOT_FOUND;
7917}
7918
7919
7920IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7921{
7922 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7923 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7924 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7925 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7926 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7927 {
7928 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7929 return VINF_SUCCESS;
7930 }
7931 return VERR_NOT_FOUND;
7932}
7933
7934
7935/**
7936 * Updates the FPU exception status after FCW is changed.
7937 *
7938 * @param pFpuCtx The FPU context.
7939 */
7940IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7941{
7942 uint16_t u16Fsw = pFpuCtx->FSW;
7943 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7944 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7945 else
7946 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7947 pFpuCtx->FSW = u16Fsw;
7948}
7949
7950
7951/**
7952 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7953 *
7954 * @returns The full FTW.
7955 * @param pFpuCtx The FPU context.
7956 */
7957IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7958{
7959 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7960 uint16_t u16Ftw = 0;
7961 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7962 for (unsigned iSt = 0; iSt < 8; iSt++)
7963 {
7964 unsigned const iReg = (iSt + iTop) & 7;
7965 if (!(u8Ftw & RT_BIT(iReg)))
7966 u16Ftw |= 3 << (iReg * 2); /* empty */
7967 else
7968 {
7969 uint16_t uTag;
7970 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7971 if (pr80Reg->s.uExponent == 0x7fff)
7972 uTag = 2; /* Exponent is all 1's => Special. */
7973 else if (pr80Reg->s.uExponent == 0x0000)
7974 {
7975 if (pr80Reg->s.u64Mantissa == 0x0000)
7976 uTag = 1; /* All bits are zero => Zero. */
7977 else
7978 uTag = 2; /* Must be special. */
7979 }
7980 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7981 uTag = 0; /* Valid. */
7982 else
7983 uTag = 2; /* Must be special. */
7984
7985 u16Ftw |= uTag << (iReg * 2); /* empty */
7986 }
7987 }
7988
7989 return u16Ftw;
7990}
7991
7992
7993/**
7994 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7995 *
7996 * @returns The compressed FTW.
7997 * @param u16FullFtw The full FTW to convert.
7998 */
7999IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
8000{
8001 uint8_t u8Ftw = 0;
8002 for (unsigned i = 0; i < 8; i++)
8003 {
8004 if ((u16FullFtw & 3) != 3 /*empty*/)
8005 u8Ftw |= RT_BIT(i);
8006 u16FullFtw >>= 2;
8007 }
8008
8009 return u8Ftw;
8010}
8011
8012/** @} */
8013
8014
8015/** @name Memory access.
8016 *
8017 * @{
8018 */
8019
8020
8021/**
8022 * Updates the IEMCPU::cbWritten counter if applicable.
8023 *
8024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8025 * @param fAccess The access being accounted for.
8026 * @param cbMem The access size.
8027 */
8028DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
8029{
8030 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8031 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8032 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8033}
8034
8035
8036/**
8037 * Checks if the given segment can be written to, raise the appropriate
8038 * exception if not.
8039 *
8040 * @returns VBox strict status code.
8041 *
8042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8043 * @param pHid Pointer to the hidden register.
8044 * @param iSegReg The register number.
8045 * @param pu64BaseAddr Where to return the base address to use for the
8046 * segment. (In 64-bit code it may differ from the
8047 * base in the hidden segment.)
8048 */
8049IEM_STATIC VBOXSTRICTRC
8050iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8051{
8052 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8053
8054 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8055 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8056 else
8057 {
8058 if (!pHid->Attr.n.u1Present)
8059 {
8060 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8061 AssertRelease(uSel == 0);
8062 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8063 return iemRaiseGeneralProtectionFault0(pVCpu);
8064 }
8065
8066 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8067 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8068 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8069 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8070 *pu64BaseAddr = pHid->u64Base;
8071 }
8072 return VINF_SUCCESS;
8073}
8074
8075
8076/**
8077 * Checks if the given segment can be read from, raise the appropriate
8078 * exception if not.
8079 *
8080 * @returns VBox strict status code.
8081 *
8082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8083 * @param pHid Pointer to the hidden register.
8084 * @param iSegReg The register number.
8085 * @param pu64BaseAddr Where to return the base address to use for the
8086 * segment. (In 64-bit code it may differ from the
8087 * base in the hidden segment.)
8088 */
8089IEM_STATIC VBOXSTRICTRC
8090iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8091{
8092 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8093
8094 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8095 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8096 else
8097 {
8098 if (!pHid->Attr.n.u1Present)
8099 {
8100 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8101 AssertRelease(uSel == 0);
8102 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8103 return iemRaiseGeneralProtectionFault0(pVCpu);
8104 }
8105
8106 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8107 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8108 *pu64BaseAddr = pHid->u64Base;
8109 }
8110 return VINF_SUCCESS;
8111}
8112
8113
8114/**
8115 * Applies the segment limit, base and attributes.
8116 *
8117 * This may raise a \#GP or \#SS.
8118 *
8119 * @returns VBox strict status code.
8120 *
8121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8122 * @param fAccess The kind of access which is being performed.
8123 * @param iSegReg The index of the segment register to apply.
8124 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8125 * TSS, ++).
8126 * @param cbMem The access size.
8127 * @param pGCPtrMem Pointer to the guest memory address to apply
8128 * segmentation to. Input and output parameter.
8129 */
8130IEM_STATIC VBOXSTRICTRC
8131iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8132{
8133 if (iSegReg == UINT8_MAX)
8134 return VINF_SUCCESS;
8135
8136 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8137 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8138 switch (pVCpu->iem.s.enmCpuMode)
8139 {
8140 case IEMMODE_16BIT:
8141 case IEMMODE_32BIT:
8142 {
8143 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8144 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8145
8146 if ( pSel->Attr.n.u1Present
8147 && !pSel->Attr.n.u1Unusable)
8148 {
8149 Assert(pSel->Attr.n.u1DescType);
8150 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8151 {
8152 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8153 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8154 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8155
8156 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8157 {
8158 /** @todo CPL check. */
8159 }
8160
8161 /*
8162 * There are two kinds of data selectors, normal and expand down.
8163 */
8164 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8165 {
8166 if ( GCPtrFirst32 > pSel->u32Limit
8167 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8168 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8169 }
8170 else
8171 {
8172 /*
8173 * The upper boundary is defined by the B bit, not the G bit!
8174 */
8175 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8176 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8177 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8178 }
8179 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8180 }
8181 else
8182 {
8183
8184 /*
8185 * Code selector and usually be used to read thru, writing is
8186 * only permitted in real and V8086 mode.
8187 */
8188 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8189 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8190 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8191 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8192 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8193
8194 if ( GCPtrFirst32 > pSel->u32Limit
8195 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8196 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8197
8198 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8199 {
8200 /** @todo CPL check. */
8201 }
8202
8203 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8204 }
8205 }
8206 else
8207 return iemRaiseGeneralProtectionFault0(pVCpu);
8208 return VINF_SUCCESS;
8209 }
8210
8211 case IEMMODE_64BIT:
8212 {
8213 RTGCPTR GCPtrMem = *pGCPtrMem;
8214 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8215 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8216
8217 Assert(cbMem >= 1);
8218 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8219 return VINF_SUCCESS;
8220 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8221 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8222 return iemRaiseGeneralProtectionFault0(pVCpu);
8223 }
8224
8225 default:
8226 AssertFailedReturn(VERR_IEM_IPE_7);
8227 }
8228}
8229
8230
8231/**
8232 * Translates a virtual address to a physical physical address and checks if we
8233 * can access the page as specified.
8234 *
8235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8236 * @param GCPtrMem The virtual address.
8237 * @param fAccess The intended access.
8238 * @param pGCPhysMem Where to return the physical address.
8239 */
8240IEM_STATIC VBOXSTRICTRC
8241iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8242{
8243 /** @todo Need a different PGM interface here. We're currently using
8244 * generic / REM interfaces. this won't cut it for R0 & RC. */
8245 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8246 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8247 RTGCPHYS GCPhys;
8248 uint64_t fFlags;
8249 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8250 if (RT_FAILURE(rc))
8251 {
8252 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8253 /** @todo Check unassigned memory in unpaged mode. */
8254 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8255 *pGCPhysMem = NIL_RTGCPHYS;
8256 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8257 }
8258
8259 /* If the page is writable and does not have the no-exec bit set, all
8260 access is allowed. Otherwise we'll have to check more carefully... */
8261 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8262 {
8263 /* Write to read only memory? */
8264 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8265 && !(fFlags & X86_PTE_RW)
8266 && ( (pVCpu->iem.s.uCpl == 3
8267 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8268 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8269 {
8270 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8271 *pGCPhysMem = NIL_RTGCPHYS;
8272 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8273 }
8274
8275 /* Kernel memory accessed by userland? */
8276 if ( !(fFlags & X86_PTE_US)
8277 && pVCpu->iem.s.uCpl == 3
8278 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8279 {
8280 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8281 *pGCPhysMem = NIL_RTGCPHYS;
8282 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8283 }
8284
8285 /* Executing non-executable memory? */
8286 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8287 && (fFlags & X86_PTE_PAE_NX)
8288 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8289 {
8290 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8291 *pGCPhysMem = NIL_RTGCPHYS;
8292 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8293 VERR_ACCESS_DENIED);
8294 }
8295 }
8296
8297 /*
8298 * Set the dirty / access flags.
8299 * ASSUMES this is set when the address is translated rather than on committ...
8300 */
8301 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8302 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8303 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8304 {
8305 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8306 AssertRC(rc2);
8307 }
8308
8309 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8310 *pGCPhysMem = GCPhys;
8311 return VINF_SUCCESS;
8312}
8313
8314
8315
8316/**
8317 * Maps a physical page.
8318 *
8319 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8321 * @param GCPhysMem The physical address.
8322 * @param fAccess The intended access.
8323 * @param ppvMem Where to return the mapping address.
8324 * @param pLock The PGM lock.
8325 */
8326IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8327{
8328#ifdef IEM_LOG_MEMORY_WRITES
8329 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8330 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8331#endif
8332
8333 /** @todo This API may require some improving later. A private deal with PGM
8334 * regarding locking and unlocking needs to be struct. A couple of TLBs
8335 * living in PGM, but with publicly accessible inlined access methods
8336 * could perhaps be an even better solution. */
8337 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8338 GCPhysMem,
8339 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8340 pVCpu->iem.s.fBypassHandlers,
8341 ppvMem,
8342 pLock);
8343 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8344 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8345
8346 return rc;
8347}
8348
8349
8350/**
8351 * Unmap a page previously mapped by iemMemPageMap.
8352 *
8353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8354 * @param GCPhysMem The physical address.
8355 * @param fAccess The intended access.
8356 * @param pvMem What iemMemPageMap returned.
8357 * @param pLock The PGM lock.
8358 */
8359DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8360{
8361 NOREF(pVCpu);
8362 NOREF(GCPhysMem);
8363 NOREF(fAccess);
8364 NOREF(pvMem);
8365 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8366}
8367
8368
8369/**
8370 * Looks up a memory mapping entry.
8371 *
8372 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8374 * @param pvMem The memory address.
8375 * @param fAccess The access to.
8376 */
8377DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8378{
8379 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8380 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8381 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8382 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8383 return 0;
8384 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8385 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8386 return 1;
8387 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8388 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8389 return 2;
8390 return VERR_NOT_FOUND;
8391}
8392
8393
8394/**
8395 * Finds a free memmap entry when using iNextMapping doesn't work.
8396 *
8397 * @returns Memory mapping index, 1024 on failure.
8398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8399 */
8400IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8401{
8402 /*
8403 * The easy case.
8404 */
8405 if (pVCpu->iem.s.cActiveMappings == 0)
8406 {
8407 pVCpu->iem.s.iNextMapping = 1;
8408 return 0;
8409 }
8410
8411 /* There should be enough mappings for all instructions. */
8412 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8413
8414 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8415 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8416 return i;
8417
8418 AssertFailedReturn(1024);
8419}
8420
8421
8422/**
8423 * Commits a bounce buffer that needs writing back and unmaps it.
8424 *
8425 * @returns Strict VBox status code.
8426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8427 * @param iMemMap The index of the buffer to commit.
8428 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8429 * Always false in ring-3, obviously.
8430 */
8431IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8432{
8433 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8434 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8435#ifdef IN_RING3
8436 Assert(!fPostponeFail);
8437 RT_NOREF_PV(fPostponeFail);
8438#endif
8439
8440 /*
8441 * Do the writing.
8442 */
8443 PVM pVM = pVCpu->CTX_SUFF(pVM);
8444 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8445 {
8446 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8447 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8448 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8449 if (!pVCpu->iem.s.fBypassHandlers)
8450 {
8451 /*
8452 * Carefully and efficiently dealing with access handler return
8453 * codes make this a little bloated.
8454 */
8455 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8456 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8457 pbBuf,
8458 cbFirst,
8459 PGMACCESSORIGIN_IEM);
8460 if (rcStrict == VINF_SUCCESS)
8461 {
8462 if (cbSecond)
8463 {
8464 rcStrict = PGMPhysWrite(pVM,
8465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8466 pbBuf + cbFirst,
8467 cbSecond,
8468 PGMACCESSORIGIN_IEM);
8469 if (rcStrict == VINF_SUCCESS)
8470 { /* nothing */ }
8471 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8472 {
8473 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8476 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8477 }
8478#ifndef IN_RING3
8479 else if (fPostponeFail)
8480 {
8481 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8484 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8485 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8486 return iemSetPassUpStatus(pVCpu, rcStrict);
8487 }
8488#endif
8489 else
8490 {
8491 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8492 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8494 return rcStrict;
8495 }
8496 }
8497 }
8498 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8499 {
8500 if (!cbSecond)
8501 {
8502 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8503 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8504 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8505 }
8506 else
8507 {
8508 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8509 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8510 pbBuf + cbFirst,
8511 cbSecond,
8512 PGMACCESSORIGIN_IEM);
8513 if (rcStrict2 == VINF_SUCCESS)
8514 {
8515 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8516 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8517 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8518 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8519 }
8520 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8521 {
8522 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8523 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8524 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8525 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8526 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8527 }
8528#ifndef IN_RING3
8529 else if (fPostponeFail)
8530 {
8531 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8532 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8533 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8534 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8535 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8536 return iemSetPassUpStatus(pVCpu, rcStrict);
8537 }
8538#endif
8539 else
8540 {
8541 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8542 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8543 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8544 return rcStrict2;
8545 }
8546 }
8547 }
8548#ifndef IN_RING3
8549 else if (fPostponeFail)
8550 {
8551 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8552 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8553 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8554 if (!cbSecond)
8555 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8556 else
8557 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8558 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8559 return iemSetPassUpStatus(pVCpu, rcStrict);
8560 }
8561#endif
8562 else
8563 {
8564 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8565 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8566 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8567 return rcStrict;
8568 }
8569 }
8570 else
8571 {
8572 /*
8573 * No access handlers, much simpler.
8574 */
8575 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8576 if (RT_SUCCESS(rc))
8577 {
8578 if (cbSecond)
8579 {
8580 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8581 if (RT_SUCCESS(rc))
8582 { /* likely */ }
8583 else
8584 {
8585 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8586 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8587 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8588 return rc;
8589 }
8590 }
8591 }
8592 else
8593 {
8594 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8595 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8596 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8597 return rc;
8598 }
8599 }
8600 }
8601
8602#if defined(IEM_LOG_MEMORY_WRITES)
8603 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8604 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8605 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8606 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8607 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8608 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8609
8610 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8611 g_cbIemWrote = cbWrote;
8612 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8613#endif
8614
8615 /*
8616 * Free the mapping entry.
8617 */
8618 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8619 Assert(pVCpu->iem.s.cActiveMappings != 0);
8620 pVCpu->iem.s.cActiveMappings--;
8621 return VINF_SUCCESS;
8622}
8623
8624
8625/**
8626 * iemMemMap worker that deals with a request crossing pages.
8627 */
8628IEM_STATIC VBOXSTRICTRC
8629iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8630{
8631 /*
8632 * Do the address translations.
8633 */
8634 RTGCPHYS GCPhysFirst;
8635 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8636 if (rcStrict != VINF_SUCCESS)
8637 return rcStrict;
8638
8639 RTGCPHYS GCPhysSecond;
8640 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8641 fAccess, &GCPhysSecond);
8642 if (rcStrict != VINF_SUCCESS)
8643 return rcStrict;
8644 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8645
8646 PVM pVM = pVCpu->CTX_SUFF(pVM);
8647
8648 /*
8649 * Read in the current memory content if it's a read, execute or partial
8650 * write access.
8651 */
8652 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8653 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8654 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8655
8656 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8657 {
8658 if (!pVCpu->iem.s.fBypassHandlers)
8659 {
8660 /*
8661 * Must carefully deal with access handler status codes here,
8662 * makes the code a bit bloated.
8663 */
8664 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8665 if (rcStrict == VINF_SUCCESS)
8666 {
8667 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8668 if (rcStrict == VINF_SUCCESS)
8669 { /*likely */ }
8670 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8671 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8672 else
8673 {
8674 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8675 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8676 return rcStrict;
8677 }
8678 }
8679 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8680 {
8681 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8682 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8683 {
8684 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8685 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8686 }
8687 else
8688 {
8689 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8690 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8691 return rcStrict2;
8692 }
8693 }
8694 else
8695 {
8696 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8697 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8698 return rcStrict;
8699 }
8700 }
8701 else
8702 {
8703 /*
8704 * No informational status codes here, much more straight forward.
8705 */
8706 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8707 if (RT_SUCCESS(rc))
8708 {
8709 Assert(rc == VINF_SUCCESS);
8710 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8711 if (RT_SUCCESS(rc))
8712 Assert(rc == VINF_SUCCESS);
8713 else
8714 {
8715 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8716 return rc;
8717 }
8718 }
8719 else
8720 {
8721 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8722 return rc;
8723 }
8724 }
8725 }
8726#ifdef VBOX_STRICT
8727 else
8728 memset(pbBuf, 0xcc, cbMem);
8729 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8730 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8731#endif
8732
8733 /*
8734 * Commit the bounce buffer entry.
8735 */
8736 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8737 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8738 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8739 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8740 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8741 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8742 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8743 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8744 pVCpu->iem.s.cActiveMappings++;
8745
8746 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8747 *ppvMem = pbBuf;
8748 return VINF_SUCCESS;
8749}
8750
8751
8752/**
8753 * iemMemMap woker that deals with iemMemPageMap failures.
8754 */
8755IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8756 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8757{
8758 /*
8759 * Filter out conditions we can handle and the ones which shouldn't happen.
8760 */
8761 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8762 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8763 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8764 {
8765 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8766 return rcMap;
8767 }
8768 pVCpu->iem.s.cPotentialExits++;
8769
8770 /*
8771 * Read in the current memory content if it's a read, execute or partial
8772 * write access.
8773 */
8774 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8775 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8776 {
8777 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8778 memset(pbBuf, 0xff, cbMem);
8779 else
8780 {
8781 int rc;
8782 if (!pVCpu->iem.s.fBypassHandlers)
8783 {
8784 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8785 if (rcStrict == VINF_SUCCESS)
8786 { /* nothing */ }
8787 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8788 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8789 else
8790 {
8791 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8792 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8793 return rcStrict;
8794 }
8795 }
8796 else
8797 {
8798 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8799 if (RT_SUCCESS(rc))
8800 { /* likely */ }
8801 else
8802 {
8803 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8804 GCPhysFirst, rc));
8805 return rc;
8806 }
8807 }
8808 }
8809 }
8810#ifdef VBOX_STRICT
8811 else
8812 memset(pbBuf, 0xcc, cbMem);
8813#endif
8814#ifdef VBOX_STRICT
8815 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8816 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8817#endif
8818
8819 /*
8820 * Commit the bounce buffer entry.
8821 */
8822 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8823 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8824 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8825 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8826 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8827 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8828 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8829 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8830 pVCpu->iem.s.cActiveMappings++;
8831
8832 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8833 *ppvMem = pbBuf;
8834 return VINF_SUCCESS;
8835}
8836
8837
8838
8839/**
8840 * Maps the specified guest memory for the given kind of access.
8841 *
8842 * This may be using bounce buffering of the memory if it's crossing a page
8843 * boundary or if there is an access handler installed for any of it. Because
8844 * of lock prefix guarantees, we're in for some extra clutter when this
8845 * happens.
8846 *
8847 * This may raise a \#GP, \#SS, \#PF or \#AC.
8848 *
8849 * @returns VBox strict status code.
8850 *
8851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8852 * @param ppvMem Where to return the pointer to the mapped
8853 * memory.
8854 * @param cbMem The number of bytes to map. This is usually 1,
8855 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8856 * string operations it can be up to a page.
8857 * @param iSegReg The index of the segment register to use for
8858 * this access. The base and limits are checked.
8859 * Use UINT8_MAX to indicate that no segmentation
8860 * is required (for IDT, GDT and LDT accesses).
8861 * @param GCPtrMem The address of the guest memory.
8862 * @param fAccess How the memory is being accessed. The
8863 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8864 * how to map the memory, while the
8865 * IEM_ACCESS_WHAT_XXX bit is used when raising
8866 * exceptions.
8867 */
8868IEM_STATIC VBOXSTRICTRC
8869iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8870{
8871 /*
8872 * Check the input and figure out which mapping entry to use.
8873 */
8874 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8875 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8876 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8877
8878 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8879 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8880 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8881 {
8882 iMemMap = iemMemMapFindFree(pVCpu);
8883 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8884 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8885 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8886 pVCpu->iem.s.aMemMappings[2].fAccess),
8887 VERR_IEM_IPE_9);
8888 }
8889
8890 /*
8891 * Map the memory, checking that we can actually access it. If something
8892 * slightly complicated happens, fall back on bounce buffering.
8893 */
8894 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8895 if (rcStrict != VINF_SUCCESS)
8896 return rcStrict;
8897
8898 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8899 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8900
8901 RTGCPHYS GCPhysFirst;
8902 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8903 if (rcStrict != VINF_SUCCESS)
8904 return rcStrict;
8905
8906 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8907 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8908 if (fAccess & IEM_ACCESS_TYPE_READ)
8909 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8910
8911 void *pvMem;
8912 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8913 if (rcStrict != VINF_SUCCESS)
8914 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8915
8916 /*
8917 * Fill in the mapping table entry.
8918 */
8919 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8920 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8921 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8922 pVCpu->iem.s.cActiveMappings++;
8923
8924 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8925 *ppvMem = pvMem;
8926
8927 return VINF_SUCCESS;
8928}
8929
8930
8931/**
8932 * Commits the guest memory if bounce buffered and unmaps it.
8933 *
8934 * @returns Strict VBox status code.
8935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8936 * @param pvMem The mapping.
8937 * @param fAccess The kind of access.
8938 */
8939IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8940{
8941 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8942 AssertReturn(iMemMap >= 0, iMemMap);
8943
8944 /* If it's bounce buffered, we may need to write back the buffer. */
8945 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8946 {
8947 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8948 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8949 }
8950 /* Otherwise unlock it. */
8951 else
8952 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8953
8954 /* Free the entry. */
8955 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8956 Assert(pVCpu->iem.s.cActiveMappings != 0);
8957 pVCpu->iem.s.cActiveMappings--;
8958 return VINF_SUCCESS;
8959}
8960
8961#ifdef IEM_WITH_SETJMP
8962
8963/**
8964 * Maps the specified guest memory for the given kind of access, longjmp on
8965 * error.
8966 *
8967 * This may be using bounce buffering of the memory if it's crossing a page
8968 * boundary or if there is an access handler installed for any of it. Because
8969 * of lock prefix guarantees, we're in for some extra clutter when this
8970 * happens.
8971 *
8972 * This may raise a \#GP, \#SS, \#PF or \#AC.
8973 *
8974 * @returns Pointer to the mapped memory.
8975 *
8976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8977 * @param cbMem The number of bytes to map. This is usually 1,
8978 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8979 * string operations it can be up to a page.
8980 * @param iSegReg The index of the segment register to use for
8981 * this access. The base and limits are checked.
8982 * Use UINT8_MAX to indicate that no segmentation
8983 * is required (for IDT, GDT and LDT accesses).
8984 * @param GCPtrMem The address of the guest memory.
8985 * @param fAccess How the memory is being accessed. The
8986 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8987 * how to map the memory, while the
8988 * IEM_ACCESS_WHAT_XXX bit is used when raising
8989 * exceptions.
8990 */
8991IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8992{
8993 /*
8994 * Check the input and figure out which mapping entry to use.
8995 */
8996 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8997 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8998 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8999
9000 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9001 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9002 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9003 {
9004 iMemMap = iemMemMapFindFree(pVCpu);
9005 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9006 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9007 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9008 pVCpu->iem.s.aMemMappings[2].fAccess),
9009 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9010 }
9011
9012 /*
9013 * Map the memory, checking that we can actually access it. If something
9014 * slightly complicated happens, fall back on bounce buffering.
9015 */
9016 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9017 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9018 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9019
9020 /* Crossing a page boundary? */
9021 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9022 { /* No (likely). */ }
9023 else
9024 {
9025 void *pvMem;
9026 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9027 if (rcStrict == VINF_SUCCESS)
9028 return pvMem;
9029 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9030 }
9031
9032 RTGCPHYS GCPhysFirst;
9033 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9034 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9035 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9036
9037 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9038 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9039 if (fAccess & IEM_ACCESS_TYPE_READ)
9040 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9041
9042 void *pvMem;
9043 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9044 if (rcStrict == VINF_SUCCESS)
9045 { /* likely */ }
9046 else
9047 {
9048 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9049 if (rcStrict == VINF_SUCCESS)
9050 return pvMem;
9051 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9052 }
9053
9054 /*
9055 * Fill in the mapping table entry.
9056 */
9057 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9058 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9059 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9060 pVCpu->iem.s.cActiveMappings++;
9061
9062 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9063 return pvMem;
9064}
9065
9066
9067/**
9068 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9069 *
9070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9071 * @param pvMem The mapping.
9072 * @param fAccess The kind of access.
9073 */
9074IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9075{
9076 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9077 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9078
9079 /* If it's bounce buffered, we may need to write back the buffer. */
9080 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9081 {
9082 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9083 {
9084 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9085 if (rcStrict == VINF_SUCCESS)
9086 return;
9087 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9088 }
9089 }
9090 /* Otherwise unlock it. */
9091 else
9092 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9093
9094 /* Free the entry. */
9095 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9096 Assert(pVCpu->iem.s.cActiveMappings != 0);
9097 pVCpu->iem.s.cActiveMappings--;
9098}
9099
9100#endif /* IEM_WITH_SETJMP */
9101
9102#ifndef IN_RING3
9103/**
9104 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9105 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9106 *
9107 * Allows the instruction to be completed and retired, while the IEM user will
9108 * return to ring-3 immediately afterwards and do the postponed writes there.
9109 *
9110 * @returns VBox status code (no strict statuses). Caller must check
9111 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9113 * @param pvMem The mapping.
9114 * @param fAccess The kind of access.
9115 */
9116IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9117{
9118 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9119 AssertReturn(iMemMap >= 0, iMemMap);
9120
9121 /* If it's bounce buffered, we may need to write back the buffer. */
9122 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9123 {
9124 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9125 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9126 }
9127 /* Otherwise unlock it. */
9128 else
9129 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9130
9131 /* Free the entry. */
9132 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9133 Assert(pVCpu->iem.s.cActiveMappings != 0);
9134 pVCpu->iem.s.cActiveMappings--;
9135 return VINF_SUCCESS;
9136}
9137#endif
9138
9139
9140/**
9141 * Rollbacks mappings, releasing page locks and such.
9142 *
9143 * The caller shall only call this after checking cActiveMappings.
9144 *
9145 * @returns Strict VBox status code to pass up.
9146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9147 */
9148IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9149{
9150 Assert(pVCpu->iem.s.cActiveMappings > 0);
9151
9152 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9153 while (iMemMap-- > 0)
9154 {
9155 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9156 if (fAccess != IEM_ACCESS_INVALID)
9157 {
9158 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9159 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9160 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9161 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9162 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9163 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9164 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9165 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9166 pVCpu->iem.s.cActiveMappings--;
9167 }
9168 }
9169}
9170
9171
9172/**
9173 * Fetches a data byte.
9174 *
9175 * @returns Strict VBox status code.
9176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9177 * @param pu8Dst Where to return the byte.
9178 * @param iSegReg The index of the segment register to use for
9179 * this access. The base and limits are checked.
9180 * @param GCPtrMem The address of the guest memory.
9181 */
9182IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9183{
9184 /* The lazy approach for now... */
9185 uint8_t const *pu8Src;
9186 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9187 if (rc == VINF_SUCCESS)
9188 {
9189 *pu8Dst = *pu8Src;
9190 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9191 }
9192 return rc;
9193}
9194
9195
9196#ifdef IEM_WITH_SETJMP
9197/**
9198 * Fetches a data byte, longjmp on error.
9199 *
9200 * @returns The byte.
9201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9202 * @param iSegReg The index of the segment register to use for
9203 * this access. The base and limits are checked.
9204 * @param GCPtrMem The address of the guest memory.
9205 */
9206DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9207{
9208 /* The lazy approach for now... */
9209 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9210 uint8_t const bRet = *pu8Src;
9211 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9212 return bRet;
9213}
9214#endif /* IEM_WITH_SETJMP */
9215
9216
9217/**
9218 * Fetches a data word.
9219 *
9220 * @returns Strict VBox status code.
9221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9222 * @param pu16Dst Where to return the word.
9223 * @param iSegReg The index of the segment register to use for
9224 * this access. The base and limits are checked.
9225 * @param GCPtrMem The address of the guest memory.
9226 */
9227IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9228{
9229 /* The lazy approach for now... */
9230 uint16_t const *pu16Src;
9231 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9232 if (rc == VINF_SUCCESS)
9233 {
9234 *pu16Dst = *pu16Src;
9235 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9236 }
9237 return rc;
9238}
9239
9240
9241#ifdef IEM_WITH_SETJMP
9242/**
9243 * Fetches a data word, longjmp on error.
9244 *
9245 * @returns The word
9246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9247 * @param iSegReg The index of the segment register to use for
9248 * this access. The base and limits are checked.
9249 * @param GCPtrMem The address of the guest memory.
9250 */
9251DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9252{
9253 /* The lazy approach for now... */
9254 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9255 uint16_t const u16Ret = *pu16Src;
9256 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9257 return u16Ret;
9258}
9259#endif
9260
9261
9262/**
9263 * Fetches a data dword.
9264 *
9265 * @returns Strict VBox status code.
9266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9267 * @param pu32Dst Where to return the dword.
9268 * @param iSegReg The index of the segment register to use for
9269 * this access. The base and limits are checked.
9270 * @param GCPtrMem The address of the guest memory.
9271 */
9272IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9273{
9274 /* The lazy approach for now... */
9275 uint32_t const *pu32Src;
9276 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9277 if (rc == VINF_SUCCESS)
9278 {
9279 *pu32Dst = *pu32Src;
9280 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9281 }
9282 return rc;
9283}
9284
9285
9286#ifdef IEM_WITH_SETJMP
9287
9288IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9289{
9290 Assert(cbMem >= 1);
9291 Assert(iSegReg < X86_SREG_COUNT);
9292
9293 /*
9294 * 64-bit mode is simpler.
9295 */
9296 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9297 {
9298 if (iSegReg >= X86_SREG_FS)
9299 {
9300 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9301 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9302 GCPtrMem += pSel->u64Base;
9303 }
9304
9305 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9306 return GCPtrMem;
9307 }
9308 /*
9309 * 16-bit and 32-bit segmentation.
9310 */
9311 else
9312 {
9313 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9314 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9315 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9316 == X86DESCATTR_P /* data, expand up */
9317 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9318 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9319 {
9320 /* expand up */
9321 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9322 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9323 && GCPtrLast32 > (uint32_t)GCPtrMem))
9324 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9325 }
9326 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9327 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9328 {
9329 /* expand down */
9330 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9331 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9332 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9333 && GCPtrLast32 > (uint32_t)GCPtrMem))
9334 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9335 }
9336 else
9337 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9338 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9339 }
9340 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9341}
9342
9343
9344IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9345{
9346 Assert(cbMem >= 1);
9347 Assert(iSegReg < X86_SREG_COUNT);
9348
9349 /*
9350 * 64-bit mode is simpler.
9351 */
9352 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9353 {
9354 if (iSegReg >= X86_SREG_FS)
9355 {
9356 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9357 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9358 GCPtrMem += pSel->u64Base;
9359 }
9360
9361 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9362 return GCPtrMem;
9363 }
9364 /*
9365 * 16-bit and 32-bit segmentation.
9366 */
9367 else
9368 {
9369 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9370 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9371 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9372 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9373 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9374 {
9375 /* expand up */
9376 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9377 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9378 && GCPtrLast32 > (uint32_t)GCPtrMem))
9379 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9380 }
9381 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9382 {
9383 /* expand down */
9384 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9385 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9386 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9387 && GCPtrLast32 > (uint32_t)GCPtrMem))
9388 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9389 }
9390 else
9391 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9392 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9393 }
9394 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9395}
9396
9397
9398/**
9399 * Fetches a data dword, longjmp on error, fallback/safe version.
9400 *
9401 * @returns The dword
9402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9403 * @param iSegReg The index of the segment register to use for
9404 * this access. The base and limits are checked.
9405 * @param GCPtrMem The address of the guest memory.
9406 */
9407IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9408{
9409 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9410 uint32_t const u32Ret = *pu32Src;
9411 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9412 return u32Ret;
9413}
9414
9415
9416/**
9417 * Fetches a data dword, longjmp on error.
9418 *
9419 * @returns The dword
9420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9421 * @param iSegReg The index of the segment register to use for
9422 * this access. The base and limits are checked.
9423 * @param GCPtrMem The address of the guest memory.
9424 */
9425DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9426{
9427# ifdef IEM_WITH_DATA_TLB
9428 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9429 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9430 {
9431 /// @todo more later.
9432 }
9433
9434 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9435# else
9436 /* The lazy approach. */
9437 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9438 uint32_t const u32Ret = *pu32Src;
9439 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9440 return u32Ret;
9441# endif
9442}
9443#endif
9444
9445
9446#ifdef SOME_UNUSED_FUNCTION
9447/**
9448 * Fetches a data dword and sign extends it to a qword.
9449 *
9450 * @returns Strict VBox status code.
9451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9452 * @param pu64Dst Where to return the sign extended value.
9453 * @param iSegReg The index of the segment register to use for
9454 * this access. The base and limits are checked.
9455 * @param GCPtrMem The address of the guest memory.
9456 */
9457IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9458{
9459 /* The lazy approach for now... */
9460 int32_t const *pi32Src;
9461 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9462 if (rc == VINF_SUCCESS)
9463 {
9464 *pu64Dst = *pi32Src;
9465 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9466 }
9467#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9468 else
9469 *pu64Dst = 0;
9470#endif
9471 return rc;
9472}
9473#endif
9474
9475
9476/**
9477 * Fetches a data qword.
9478 *
9479 * @returns Strict VBox status code.
9480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9481 * @param pu64Dst Where to return the qword.
9482 * @param iSegReg The index of the segment register to use for
9483 * this access. The base and limits are checked.
9484 * @param GCPtrMem The address of the guest memory.
9485 */
9486IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9487{
9488 /* The lazy approach for now... */
9489 uint64_t const *pu64Src;
9490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9491 if (rc == VINF_SUCCESS)
9492 {
9493 *pu64Dst = *pu64Src;
9494 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9495 }
9496 return rc;
9497}
9498
9499
9500#ifdef IEM_WITH_SETJMP
9501/**
9502 * Fetches a data qword, longjmp on error.
9503 *
9504 * @returns The qword.
9505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9506 * @param iSegReg The index of the segment register to use for
9507 * this access. The base and limits are checked.
9508 * @param GCPtrMem The address of the guest memory.
9509 */
9510DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9511{
9512 /* The lazy approach for now... */
9513 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9514 uint64_t const u64Ret = *pu64Src;
9515 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9516 return u64Ret;
9517}
9518#endif
9519
9520
9521/**
9522 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9523 *
9524 * @returns Strict VBox status code.
9525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9526 * @param pu64Dst Where to return the qword.
9527 * @param iSegReg The index of the segment register to use for
9528 * this access. The base and limits are checked.
9529 * @param GCPtrMem The address of the guest memory.
9530 */
9531IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9532{
9533 /* The lazy approach for now... */
9534 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9535 if (RT_UNLIKELY(GCPtrMem & 15))
9536 return iemRaiseGeneralProtectionFault0(pVCpu);
9537
9538 uint64_t const *pu64Src;
9539 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9540 if (rc == VINF_SUCCESS)
9541 {
9542 *pu64Dst = *pu64Src;
9543 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9544 }
9545 return rc;
9546}
9547
9548
9549#ifdef IEM_WITH_SETJMP
9550/**
9551 * Fetches a data qword, longjmp on error.
9552 *
9553 * @returns The qword.
9554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9555 * @param iSegReg The index of the segment register to use for
9556 * this access. The base and limits are checked.
9557 * @param GCPtrMem The address of the guest memory.
9558 */
9559DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9560{
9561 /* The lazy approach for now... */
9562 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9563 if (RT_LIKELY(!(GCPtrMem & 15)))
9564 {
9565 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9566 uint64_t const u64Ret = *pu64Src;
9567 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9568 return u64Ret;
9569 }
9570
9571 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9572 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9573}
9574#endif
9575
9576
9577/**
9578 * Fetches a data tword.
9579 *
9580 * @returns Strict VBox status code.
9581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9582 * @param pr80Dst Where to return the tword.
9583 * @param iSegReg The index of the segment register to use for
9584 * this access. The base and limits are checked.
9585 * @param GCPtrMem The address of the guest memory.
9586 */
9587IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9588{
9589 /* The lazy approach for now... */
9590 PCRTFLOAT80U pr80Src;
9591 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9592 if (rc == VINF_SUCCESS)
9593 {
9594 *pr80Dst = *pr80Src;
9595 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9596 }
9597 return rc;
9598}
9599
9600
9601#ifdef IEM_WITH_SETJMP
9602/**
9603 * Fetches a data tword, longjmp on error.
9604 *
9605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9606 * @param pr80Dst Where to return the tword.
9607 * @param iSegReg The index of the segment register to use for
9608 * this access. The base and limits are checked.
9609 * @param GCPtrMem The address of the guest memory.
9610 */
9611DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9612{
9613 /* The lazy approach for now... */
9614 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9615 *pr80Dst = *pr80Src;
9616 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9617}
9618#endif
9619
9620
9621/**
9622 * Fetches a data dqword (double qword), generally SSE related.
9623 *
9624 * @returns Strict VBox status code.
9625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9626 * @param pu128Dst Where to return the qword.
9627 * @param iSegReg The index of the segment register to use for
9628 * this access. The base and limits are checked.
9629 * @param GCPtrMem The address of the guest memory.
9630 */
9631IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9632{
9633 /* The lazy approach for now... */
9634 PCRTUINT128U pu128Src;
9635 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9636 if (rc == VINF_SUCCESS)
9637 {
9638 pu128Dst->au64[0] = pu128Src->au64[0];
9639 pu128Dst->au64[1] = pu128Src->au64[1];
9640 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9641 }
9642 return rc;
9643}
9644
9645
9646#ifdef IEM_WITH_SETJMP
9647/**
9648 * Fetches a data dqword (double qword), generally SSE related.
9649 *
9650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9651 * @param pu128Dst Where to return the qword.
9652 * @param iSegReg The index of the segment register to use for
9653 * this access. The base and limits are checked.
9654 * @param GCPtrMem The address of the guest memory.
9655 */
9656IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9657{
9658 /* The lazy approach for now... */
9659 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9660 pu128Dst->au64[0] = pu128Src->au64[0];
9661 pu128Dst->au64[1] = pu128Src->au64[1];
9662 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9663}
9664#endif
9665
9666
9667/**
9668 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9669 * related.
9670 *
9671 * Raises \#GP(0) if not aligned.
9672 *
9673 * @returns Strict VBox status code.
9674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9675 * @param pu128Dst Where to return the qword.
9676 * @param iSegReg The index of the segment register to use for
9677 * this access. The base and limits are checked.
9678 * @param GCPtrMem The address of the guest memory.
9679 */
9680IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9681{
9682 /* The lazy approach for now... */
9683 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9684 if ( (GCPtrMem & 15)
9685 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9686 return iemRaiseGeneralProtectionFault0(pVCpu);
9687
9688 PCRTUINT128U pu128Src;
9689 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9690 if (rc == VINF_SUCCESS)
9691 {
9692 pu128Dst->au64[0] = pu128Src->au64[0];
9693 pu128Dst->au64[1] = pu128Src->au64[1];
9694 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9695 }
9696 return rc;
9697}
9698
9699
9700#ifdef IEM_WITH_SETJMP
9701/**
9702 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9703 * related, longjmp on error.
9704 *
9705 * Raises \#GP(0) if not aligned.
9706 *
9707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9708 * @param pu128Dst Where to return the qword.
9709 * @param iSegReg The index of the segment register to use for
9710 * this access. The base and limits are checked.
9711 * @param GCPtrMem The address of the guest memory.
9712 */
9713DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9714{
9715 /* The lazy approach for now... */
9716 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9717 if ( (GCPtrMem & 15) == 0
9718 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9719 {
9720 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9721 pu128Dst->au64[0] = pu128Src->au64[0];
9722 pu128Dst->au64[1] = pu128Src->au64[1];
9723 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9724 return;
9725 }
9726
9727 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9728 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9729}
9730#endif
9731
9732
9733/**
9734 * Fetches a data oword (octo word), generally AVX related.
9735 *
9736 * @returns Strict VBox status code.
9737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9738 * @param pu256Dst Where to return the qword.
9739 * @param iSegReg The index of the segment register to use for
9740 * this access. The base and limits are checked.
9741 * @param GCPtrMem The address of the guest memory.
9742 */
9743IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9744{
9745 /* The lazy approach for now... */
9746 PCRTUINT256U pu256Src;
9747 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9748 if (rc == VINF_SUCCESS)
9749 {
9750 pu256Dst->au64[0] = pu256Src->au64[0];
9751 pu256Dst->au64[1] = pu256Src->au64[1];
9752 pu256Dst->au64[2] = pu256Src->au64[2];
9753 pu256Dst->au64[3] = pu256Src->au64[3];
9754 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9755 }
9756 return rc;
9757}
9758
9759
9760#ifdef IEM_WITH_SETJMP
9761/**
9762 * Fetches a data oword (octo word), generally AVX related.
9763 *
9764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9765 * @param pu256Dst Where to return the qword.
9766 * @param iSegReg The index of the segment register to use for
9767 * this access. The base and limits are checked.
9768 * @param GCPtrMem The address of the guest memory.
9769 */
9770IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9771{
9772 /* The lazy approach for now... */
9773 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9774 pu256Dst->au64[0] = pu256Src->au64[0];
9775 pu256Dst->au64[1] = pu256Src->au64[1];
9776 pu256Dst->au64[2] = pu256Src->au64[2];
9777 pu256Dst->au64[3] = pu256Src->au64[3];
9778 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9779}
9780#endif
9781
9782
9783/**
9784 * Fetches a data oword (octo word) at an aligned address, generally AVX
9785 * related.
9786 *
9787 * Raises \#GP(0) if not aligned.
9788 *
9789 * @returns Strict VBox status code.
9790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9791 * @param pu256Dst Where to return the qword.
9792 * @param iSegReg The index of the segment register to use for
9793 * this access. The base and limits are checked.
9794 * @param GCPtrMem The address of the guest memory.
9795 */
9796IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9797{
9798 /* The lazy approach for now... */
9799 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9800 if (GCPtrMem & 31)
9801 return iemRaiseGeneralProtectionFault0(pVCpu);
9802
9803 PCRTUINT256U pu256Src;
9804 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9805 if (rc == VINF_SUCCESS)
9806 {
9807 pu256Dst->au64[0] = pu256Src->au64[0];
9808 pu256Dst->au64[1] = pu256Src->au64[1];
9809 pu256Dst->au64[2] = pu256Src->au64[2];
9810 pu256Dst->au64[3] = pu256Src->au64[3];
9811 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9812 }
9813 return rc;
9814}
9815
9816
9817#ifdef IEM_WITH_SETJMP
9818/**
9819 * Fetches a data oword (octo word) at an aligned address, generally AVX
9820 * related, longjmp on error.
9821 *
9822 * Raises \#GP(0) if not aligned.
9823 *
9824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9825 * @param pu256Dst Where to return the qword.
9826 * @param iSegReg The index of the segment register to use for
9827 * this access. The base and limits are checked.
9828 * @param GCPtrMem The address of the guest memory.
9829 */
9830DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9831{
9832 /* The lazy approach for now... */
9833 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9834 if ((GCPtrMem & 31) == 0)
9835 {
9836 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9837 pu256Dst->au64[0] = pu256Src->au64[0];
9838 pu256Dst->au64[1] = pu256Src->au64[1];
9839 pu256Dst->au64[2] = pu256Src->au64[2];
9840 pu256Dst->au64[3] = pu256Src->au64[3];
9841 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9842 return;
9843 }
9844
9845 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9846 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9847}
9848#endif
9849
9850
9851
9852/**
9853 * Fetches a descriptor register (lgdt, lidt).
9854 *
9855 * @returns Strict VBox status code.
9856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9857 * @param pcbLimit Where to return the limit.
9858 * @param pGCPtrBase Where to return the base.
9859 * @param iSegReg The index of the segment register to use for
9860 * this access. The base and limits are checked.
9861 * @param GCPtrMem The address of the guest memory.
9862 * @param enmOpSize The effective operand size.
9863 */
9864IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9865 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9866{
9867 /*
9868 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9869 * little special:
9870 * - The two reads are done separately.
9871 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9872 * - We suspect the 386 to actually commit the limit before the base in
9873 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9874 * don't try emulate this eccentric behavior, because it's not well
9875 * enough understood and rather hard to trigger.
9876 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9877 */
9878 VBOXSTRICTRC rcStrict;
9879 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9880 {
9881 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9882 if (rcStrict == VINF_SUCCESS)
9883 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9884 }
9885 else
9886 {
9887 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9888 if (enmOpSize == IEMMODE_32BIT)
9889 {
9890 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9891 {
9892 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9893 if (rcStrict == VINF_SUCCESS)
9894 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9895 }
9896 else
9897 {
9898 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9899 if (rcStrict == VINF_SUCCESS)
9900 {
9901 *pcbLimit = (uint16_t)uTmp;
9902 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9903 }
9904 }
9905 if (rcStrict == VINF_SUCCESS)
9906 *pGCPtrBase = uTmp;
9907 }
9908 else
9909 {
9910 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9911 if (rcStrict == VINF_SUCCESS)
9912 {
9913 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9914 if (rcStrict == VINF_SUCCESS)
9915 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9916 }
9917 }
9918 }
9919 return rcStrict;
9920}
9921
9922
9923
9924/**
9925 * Stores a data byte.
9926 *
9927 * @returns Strict VBox status code.
9928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9929 * @param iSegReg The index of the segment register to use for
9930 * this access. The base and limits are checked.
9931 * @param GCPtrMem The address of the guest memory.
9932 * @param u8Value The value to store.
9933 */
9934IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9935{
9936 /* The lazy approach for now... */
9937 uint8_t *pu8Dst;
9938 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9939 if (rc == VINF_SUCCESS)
9940 {
9941 *pu8Dst = u8Value;
9942 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9943 }
9944 return rc;
9945}
9946
9947
9948#ifdef IEM_WITH_SETJMP
9949/**
9950 * Stores a data byte, longjmp on error.
9951 *
9952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9953 * @param iSegReg The index of the segment register to use for
9954 * this access. The base and limits are checked.
9955 * @param GCPtrMem The address of the guest memory.
9956 * @param u8Value The value to store.
9957 */
9958IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9959{
9960 /* The lazy approach for now... */
9961 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9962 *pu8Dst = u8Value;
9963 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9964}
9965#endif
9966
9967
9968/**
9969 * Stores a data word.
9970 *
9971 * @returns Strict VBox status code.
9972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9973 * @param iSegReg The index of the segment register to use for
9974 * this access. The base and limits are checked.
9975 * @param GCPtrMem The address of the guest memory.
9976 * @param u16Value The value to store.
9977 */
9978IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9979{
9980 /* The lazy approach for now... */
9981 uint16_t *pu16Dst;
9982 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9983 if (rc == VINF_SUCCESS)
9984 {
9985 *pu16Dst = u16Value;
9986 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9987 }
9988 return rc;
9989}
9990
9991
9992#ifdef IEM_WITH_SETJMP
9993/**
9994 * Stores a data word, longjmp on error.
9995 *
9996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9997 * @param iSegReg The index of the segment register to use for
9998 * this access. The base and limits are checked.
9999 * @param GCPtrMem The address of the guest memory.
10000 * @param u16Value The value to store.
10001 */
10002IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10003{
10004 /* The lazy approach for now... */
10005 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10006 *pu16Dst = u16Value;
10007 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10008}
10009#endif
10010
10011
10012/**
10013 * Stores a data dword.
10014 *
10015 * @returns Strict VBox status code.
10016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10017 * @param iSegReg The index of the segment register to use for
10018 * this access. The base and limits are checked.
10019 * @param GCPtrMem The address of the guest memory.
10020 * @param u32Value The value to store.
10021 */
10022IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10023{
10024 /* The lazy approach for now... */
10025 uint32_t *pu32Dst;
10026 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10027 if (rc == VINF_SUCCESS)
10028 {
10029 *pu32Dst = u32Value;
10030 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10031 }
10032 return rc;
10033}
10034
10035
10036#ifdef IEM_WITH_SETJMP
10037/**
10038 * Stores a data dword.
10039 *
10040 * @returns Strict VBox status code.
10041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10042 * @param iSegReg The index of the segment register to use for
10043 * this access. The base and limits are checked.
10044 * @param GCPtrMem The address of the guest memory.
10045 * @param u32Value The value to store.
10046 */
10047IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10048{
10049 /* The lazy approach for now... */
10050 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10051 *pu32Dst = u32Value;
10052 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10053}
10054#endif
10055
10056
10057/**
10058 * Stores a data qword.
10059 *
10060 * @returns Strict VBox status code.
10061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10062 * @param iSegReg The index of the segment register to use for
10063 * this access. The base and limits are checked.
10064 * @param GCPtrMem The address of the guest memory.
10065 * @param u64Value The value to store.
10066 */
10067IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10068{
10069 /* The lazy approach for now... */
10070 uint64_t *pu64Dst;
10071 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10072 if (rc == VINF_SUCCESS)
10073 {
10074 *pu64Dst = u64Value;
10075 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10076 }
10077 return rc;
10078}
10079
10080
10081#ifdef IEM_WITH_SETJMP
10082/**
10083 * Stores a data qword, longjmp on error.
10084 *
10085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10086 * @param iSegReg The index of the segment register to use for
10087 * this access. The base and limits are checked.
10088 * @param GCPtrMem The address of the guest memory.
10089 * @param u64Value The value to store.
10090 */
10091IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10092{
10093 /* The lazy approach for now... */
10094 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10095 *pu64Dst = u64Value;
10096 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10097}
10098#endif
10099
10100
10101/**
10102 * Stores a data dqword.
10103 *
10104 * @returns Strict VBox status code.
10105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10106 * @param iSegReg The index of the segment register to use for
10107 * this access. The base and limits are checked.
10108 * @param GCPtrMem The address of the guest memory.
10109 * @param u128Value The value to store.
10110 */
10111IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10112{
10113 /* The lazy approach for now... */
10114 PRTUINT128U pu128Dst;
10115 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10116 if (rc == VINF_SUCCESS)
10117 {
10118 pu128Dst->au64[0] = u128Value.au64[0];
10119 pu128Dst->au64[1] = u128Value.au64[1];
10120 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10121 }
10122 return rc;
10123}
10124
10125
10126#ifdef IEM_WITH_SETJMP
10127/**
10128 * Stores a data dqword, longjmp on error.
10129 *
10130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10131 * @param iSegReg The index of the segment register to use for
10132 * this access. The base and limits are checked.
10133 * @param GCPtrMem The address of the guest memory.
10134 * @param u128Value The value to store.
10135 */
10136IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10137{
10138 /* The lazy approach for now... */
10139 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10140 pu128Dst->au64[0] = u128Value.au64[0];
10141 pu128Dst->au64[1] = u128Value.au64[1];
10142 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10143}
10144#endif
10145
10146
10147/**
10148 * Stores a data dqword, SSE aligned.
10149 *
10150 * @returns Strict VBox status code.
10151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10152 * @param iSegReg The index of the segment register to use for
10153 * this access. The base and limits are checked.
10154 * @param GCPtrMem The address of the guest memory.
10155 * @param u128Value The value to store.
10156 */
10157IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10158{
10159 /* The lazy approach for now... */
10160 if ( (GCPtrMem & 15)
10161 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10162 return iemRaiseGeneralProtectionFault0(pVCpu);
10163
10164 PRTUINT128U pu128Dst;
10165 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10166 if (rc == VINF_SUCCESS)
10167 {
10168 pu128Dst->au64[0] = u128Value.au64[0];
10169 pu128Dst->au64[1] = u128Value.au64[1];
10170 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10171 }
10172 return rc;
10173}
10174
10175
10176#ifdef IEM_WITH_SETJMP
10177/**
10178 * Stores a data dqword, SSE aligned.
10179 *
10180 * @returns Strict VBox status code.
10181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10182 * @param iSegReg The index of the segment register to use for
10183 * this access. The base and limits are checked.
10184 * @param GCPtrMem The address of the guest memory.
10185 * @param u128Value The value to store.
10186 */
10187DECL_NO_INLINE(IEM_STATIC, void)
10188iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10189{
10190 /* The lazy approach for now... */
10191 if ( (GCPtrMem & 15) == 0
10192 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10193 {
10194 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10195 pu128Dst->au64[0] = u128Value.au64[0];
10196 pu128Dst->au64[1] = u128Value.au64[1];
10197 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10198 return;
10199 }
10200
10201 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10202 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10203}
10204#endif
10205
10206
10207/**
10208 * Stores a data dqword.
10209 *
10210 * @returns Strict VBox status code.
10211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10212 * @param iSegReg The index of the segment register to use for
10213 * this access. The base and limits are checked.
10214 * @param GCPtrMem The address of the guest memory.
10215 * @param pu256Value Pointer to the value to store.
10216 */
10217IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10218{
10219 /* The lazy approach for now... */
10220 PRTUINT256U pu256Dst;
10221 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10222 if (rc == VINF_SUCCESS)
10223 {
10224 pu256Dst->au64[0] = pu256Value->au64[0];
10225 pu256Dst->au64[1] = pu256Value->au64[1];
10226 pu256Dst->au64[2] = pu256Value->au64[2];
10227 pu256Dst->au64[3] = pu256Value->au64[3];
10228 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10229 }
10230 return rc;
10231}
10232
10233
10234#ifdef IEM_WITH_SETJMP
10235/**
10236 * Stores a data dqword, longjmp on error.
10237 *
10238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10239 * @param iSegReg The index of the segment register to use for
10240 * this access. The base and limits are checked.
10241 * @param GCPtrMem The address of the guest memory.
10242 * @param pu256Value Pointer to the value to store.
10243 */
10244IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10245{
10246 /* The lazy approach for now... */
10247 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10248 pu256Dst->au64[0] = pu256Value->au64[0];
10249 pu256Dst->au64[1] = pu256Value->au64[1];
10250 pu256Dst->au64[2] = pu256Value->au64[2];
10251 pu256Dst->au64[3] = pu256Value->au64[3];
10252 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10253}
10254#endif
10255
10256
10257/**
10258 * Stores a data dqword, AVX aligned.
10259 *
10260 * @returns Strict VBox status code.
10261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10262 * @param iSegReg The index of the segment register to use for
10263 * this access. The base and limits are checked.
10264 * @param GCPtrMem The address of the guest memory.
10265 * @param pu256Value Pointer to the value to store.
10266 */
10267IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10268{
10269 /* The lazy approach for now... */
10270 if (GCPtrMem & 31)
10271 return iemRaiseGeneralProtectionFault0(pVCpu);
10272
10273 PRTUINT256U pu256Dst;
10274 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10275 if (rc == VINF_SUCCESS)
10276 {
10277 pu256Dst->au64[0] = pu256Value->au64[0];
10278 pu256Dst->au64[1] = pu256Value->au64[1];
10279 pu256Dst->au64[2] = pu256Value->au64[2];
10280 pu256Dst->au64[3] = pu256Value->au64[3];
10281 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10282 }
10283 return rc;
10284}
10285
10286
10287#ifdef IEM_WITH_SETJMP
10288/**
10289 * Stores a data dqword, AVX aligned.
10290 *
10291 * @returns Strict VBox status code.
10292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10293 * @param iSegReg The index of the segment register to use for
10294 * this access. The base and limits are checked.
10295 * @param GCPtrMem The address of the guest memory.
10296 * @param pu256Value Pointer to the value to store.
10297 */
10298DECL_NO_INLINE(IEM_STATIC, void)
10299iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10300{
10301 /* The lazy approach for now... */
10302 if ((GCPtrMem & 31) == 0)
10303 {
10304 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10305 pu256Dst->au64[0] = pu256Value->au64[0];
10306 pu256Dst->au64[1] = pu256Value->au64[1];
10307 pu256Dst->au64[2] = pu256Value->au64[2];
10308 pu256Dst->au64[3] = pu256Value->au64[3];
10309 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10310 return;
10311 }
10312
10313 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10314 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10315}
10316#endif
10317
10318
10319/**
10320 * Stores a descriptor register (sgdt, sidt).
10321 *
10322 * @returns Strict VBox status code.
10323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10324 * @param cbLimit The limit.
10325 * @param GCPtrBase The base address.
10326 * @param iSegReg The index of the segment register to use for
10327 * this access. The base and limits are checked.
10328 * @param GCPtrMem The address of the guest memory.
10329 */
10330IEM_STATIC VBOXSTRICTRC
10331iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10332{
10333 /*
10334 * The SIDT and SGDT instructions actually stores the data using two
10335 * independent writes. The instructions does not respond to opsize prefixes.
10336 */
10337 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10338 if (rcStrict == VINF_SUCCESS)
10339 {
10340 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10341 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10342 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10343 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10344 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10345 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10346 else
10347 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10348 }
10349 return rcStrict;
10350}
10351
10352
10353/**
10354 * Pushes a word onto the stack.
10355 *
10356 * @returns Strict VBox status code.
10357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10358 * @param u16Value The value to push.
10359 */
10360IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10361{
10362 /* Increment the stack pointer. */
10363 uint64_t uNewRsp;
10364 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10365
10366 /* Write the word the lazy way. */
10367 uint16_t *pu16Dst;
10368 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10369 if (rc == VINF_SUCCESS)
10370 {
10371 *pu16Dst = u16Value;
10372 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10373 }
10374
10375 /* Commit the new RSP value unless we an access handler made trouble. */
10376 if (rc == VINF_SUCCESS)
10377 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10378
10379 return rc;
10380}
10381
10382
10383/**
10384 * Pushes a dword onto the stack.
10385 *
10386 * @returns Strict VBox status code.
10387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10388 * @param u32Value The value to push.
10389 */
10390IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10391{
10392 /* Increment the stack pointer. */
10393 uint64_t uNewRsp;
10394 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10395
10396 /* Write the dword the lazy way. */
10397 uint32_t *pu32Dst;
10398 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10399 if (rc == VINF_SUCCESS)
10400 {
10401 *pu32Dst = u32Value;
10402 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10403 }
10404
10405 /* Commit the new RSP value unless we an access handler made trouble. */
10406 if (rc == VINF_SUCCESS)
10407 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10408
10409 return rc;
10410}
10411
10412
10413/**
10414 * Pushes a dword segment register value onto the stack.
10415 *
10416 * @returns Strict VBox status code.
10417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10418 * @param u32Value The value to push.
10419 */
10420IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10421{
10422 /* Increment the stack pointer. */
10423 uint64_t uNewRsp;
10424 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10425
10426 /* The intel docs talks about zero extending the selector register
10427 value. My actual intel CPU here might be zero extending the value
10428 but it still only writes the lower word... */
10429 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10430 * happens when crossing an electric page boundrary, is the high word checked
10431 * for write accessibility or not? Probably it is. What about segment limits?
10432 * It appears this behavior is also shared with trap error codes.
10433 *
10434 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10435 * ancient hardware when it actually did change. */
10436 uint16_t *pu16Dst;
10437 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10438 if (rc == VINF_SUCCESS)
10439 {
10440 *pu16Dst = (uint16_t)u32Value;
10441 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10442 }
10443
10444 /* Commit the new RSP value unless we an access handler made trouble. */
10445 if (rc == VINF_SUCCESS)
10446 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10447
10448 return rc;
10449}
10450
10451
10452/**
10453 * Pushes a qword onto the stack.
10454 *
10455 * @returns Strict VBox status code.
10456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10457 * @param u64Value The value to push.
10458 */
10459IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10460{
10461 /* Increment the stack pointer. */
10462 uint64_t uNewRsp;
10463 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10464
10465 /* Write the word the lazy way. */
10466 uint64_t *pu64Dst;
10467 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10468 if (rc == VINF_SUCCESS)
10469 {
10470 *pu64Dst = u64Value;
10471 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10472 }
10473
10474 /* Commit the new RSP value unless we an access handler made trouble. */
10475 if (rc == VINF_SUCCESS)
10476 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10477
10478 return rc;
10479}
10480
10481
10482/**
10483 * Pops a word from the stack.
10484 *
10485 * @returns Strict VBox status code.
10486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10487 * @param pu16Value Where to store the popped value.
10488 */
10489IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10490{
10491 /* Increment the stack pointer. */
10492 uint64_t uNewRsp;
10493 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10494
10495 /* Write the word the lazy way. */
10496 uint16_t const *pu16Src;
10497 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10498 if (rc == VINF_SUCCESS)
10499 {
10500 *pu16Value = *pu16Src;
10501 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10502
10503 /* Commit the new RSP value. */
10504 if (rc == VINF_SUCCESS)
10505 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10506 }
10507
10508 return rc;
10509}
10510
10511
10512/**
10513 * Pops a dword from the stack.
10514 *
10515 * @returns Strict VBox status code.
10516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10517 * @param pu32Value Where to store the popped value.
10518 */
10519IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10520{
10521 /* Increment the stack pointer. */
10522 uint64_t uNewRsp;
10523 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10524
10525 /* Write the word the lazy way. */
10526 uint32_t const *pu32Src;
10527 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10528 if (rc == VINF_SUCCESS)
10529 {
10530 *pu32Value = *pu32Src;
10531 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10532
10533 /* Commit the new RSP value. */
10534 if (rc == VINF_SUCCESS)
10535 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10536 }
10537
10538 return rc;
10539}
10540
10541
10542/**
10543 * Pops a qword from the stack.
10544 *
10545 * @returns Strict VBox status code.
10546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10547 * @param pu64Value Where to store the popped value.
10548 */
10549IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10550{
10551 /* Increment the stack pointer. */
10552 uint64_t uNewRsp;
10553 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10554
10555 /* Write the word the lazy way. */
10556 uint64_t const *pu64Src;
10557 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10558 if (rc == VINF_SUCCESS)
10559 {
10560 *pu64Value = *pu64Src;
10561 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10562
10563 /* Commit the new RSP value. */
10564 if (rc == VINF_SUCCESS)
10565 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10566 }
10567
10568 return rc;
10569}
10570
10571
10572/**
10573 * Pushes a word onto the stack, using a temporary stack pointer.
10574 *
10575 * @returns Strict VBox status code.
10576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10577 * @param u16Value The value to push.
10578 * @param pTmpRsp Pointer to the temporary stack pointer.
10579 */
10580IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10581{
10582 /* Increment the stack pointer. */
10583 RTUINT64U NewRsp = *pTmpRsp;
10584 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10585
10586 /* Write the word the lazy way. */
10587 uint16_t *pu16Dst;
10588 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10589 if (rc == VINF_SUCCESS)
10590 {
10591 *pu16Dst = u16Value;
10592 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10593 }
10594
10595 /* Commit the new RSP value unless we an access handler made trouble. */
10596 if (rc == VINF_SUCCESS)
10597 *pTmpRsp = NewRsp;
10598
10599 return rc;
10600}
10601
10602
10603/**
10604 * Pushes a dword onto the stack, using a temporary stack pointer.
10605 *
10606 * @returns Strict VBox status code.
10607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10608 * @param u32Value The value to push.
10609 * @param pTmpRsp Pointer to the temporary stack pointer.
10610 */
10611IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10612{
10613 /* Increment the stack pointer. */
10614 RTUINT64U NewRsp = *pTmpRsp;
10615 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10616
10617 /* Write the word the lazy way. */
10618 uint32_t *pu32Dst;
10619 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10620 if (rc == VINF_SUCCESS)
10621 {
10622 *pu32Dst = u32Value;
10623 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10624 }
10625
10626 /* Commit the new RSP value unless we an access handler made trouble. */
10627 if (rc == VINF_SUCCESS)
10628 *pTmpRsp = NewRsp;
10629
10630 return rc;
10631}
10632
10633
10634/**
10635 * Pushes a dword onto the stack, using a temporary stack pointer.
10636 *
10637 * @returns Strict VBox status code.
10638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10639 * @param u64Value The value to push.
10640 * @param pTmpRsp Pointer to the temporary stack pointer.
10641 */
10642IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10643{
10644 /* Increment the stack pointer. */
10645 RTUINT64U NewRsp = *pTmpRsp;
10646 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10647
10648 /* Write the word the lazy way. */
10649 uint64_t *pu64Dst;
10650 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10651 if (rc == VINF_SUCCESS)
10652 {
10653 *pu64Dst = u64Value;
10654 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10655 }
10656
10657 /* Commit the new RSP value unless we an access handler made trouble. */
10658 if (rc == VINF_SUCCESS)
10659 *pTmpRsp = NewRsp;
10660
10661 return rc;
10662}
10663
10664
10665/**
10666 * Pops a word from the stack, using a temporary stack pointer.
10667 *
10668 * @returns Strict VBox status code.
10669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10670 * @param pu16Value Where to store the popped value.
10671 * @param pTmpRsp Pointer to the temporary stack pointer.
10672 */
10673IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10674{
10675 /* Increment the stack pointer. */
10676 RTUINT64U NewRsp = *pTmpRsp;
10677 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10678
10679 /* Write the word the lazy way. */
10680 uint16_t const *pu16Src;
10681 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10682 if (rc == VINF_SUCCESS)
10683 {
10684 *pu16Value = *pu16Src;
10685 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10686
10687 /* Commit the new RSP value. */
10688 if (rc == VINF_SUCCESS)
10689 *pTmpRsp = NewRsp;
10690 }
10691
10692 return rc;
10693}
10694
10695
10696/**
10697 * Pops a dword from the stack, using a temporary stack pointer.
10698 *
10699 * @returns Strict VBox status code.
10700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10701 * @param pu32Value Where to store the popped value.
10702 * @param pTmpRsp Pointer to the temporary stack pointer.
10703 */
10704IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10705{
10706 /* Increment the stack pointer. */
10707 RTUINT64U NewRsp = *pTmpRsp;
10708 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10709
10710 /* Write the word the lazy way. */
10711 uint32_t const *pu32Src;
10712 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10713 if (rc == VINF_SUCCESS)
10714 {
10715 *pu32Value = *pu32Src;
10716 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10717
10718 /* Commit the new RSP value. */
10719 if (rc == VINF_SUCCESS)
10720 *pTmpRsp = NewRsp;
10721 }
10722
10723 return rc;
10724}
10725
10726
10727/**
10728 * Pops a qword from the stack, using a temporary stack pointer.
10729 *
10730 * @returns Strict VBox status code.
10731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10732 * @param pu64Value Where to store the popped value.
10733 * @param pTmpRsp Pointer to the temporary stack pointer.
10734 */
10735IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10736{
10737 /* Increment the stack pointer. */
10738 RTUINT64U NewRsp = *pTmpRsp;
10739 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10740
10741 /* Write the word the lazy way. */
10742 uint64_t const *pu64Src;
10743 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10744 if (rcStrict == VINF_SUCCESS)
10745 {
10746 *pu64Value = *pu64Src;
10747 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10748
10749 /* Commit the new RSP value. */
10750 if (rcStrict == VINF_SUCCESS)
10751 *pTmpRsp = NewRsp;
10752 }
10753
10754 return rcStrict;
10755}
10756
10757
10758/**
10759 * Begin a special stack push (used by interrupt, exceptions and such).
10760 *
10761 * This will raise \#SS or \#PF if appropriate.
10762 *
10763 * @returns Strict VBox status code.
10764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10765 * @param cbMem The number of bytes to push onto the stack.
10766 * @param ppvMem Where to return the pointer to the stack memory.
10767 * As with the other memory functions this could be
10768 * direct access or bounce buffered access, so
10769 * don't commit register until the commit call
10770 * succeeds.
10771 * @param puNewRsp Where to return the new RSP value. This must be
10772 * passed unchanged to
10773 * iemMemStackPushCommitSpecial().
10774 */
10775IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10776{
10777 Assert(cbMem < UINT8_MAX);
10778 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10779 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10780}
10781
10782
10783/**
10784 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10785 *
10786 * This will update the rSP.
10787 *
10788 * @returns Strict VBox status code.
10789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10790 * @param pvMem The pointer returned by
10791 * iemMemStackPushBeginSpecial().
10792 * @param uNewRsp The new RSP value returned by
10793 * iemMemStackPushBeginSpecial().
10794 */
10795IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10796{
10797 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10798 if (rcStrict == VINF_SUCCESS)
10799 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10800 return rcStrict;
10801}
10802
10803
10804/**
10805 * Begin a special stack pop (used by iret, retf and such).
10806 *
10807 * This will raise \#SS or \#PF if appropriate.
10808 *
10809 * @returns Strict VBox status code.
10810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10811 * @param cbMem The number of bytes to pop from the stack.
10812 * @param ppvMem Where to return the pointer to the stack memory.
10813 * @param puNewRsp Where to return the new RSP value. This must be
10814 * assigned to CPUMCTX::rsp manually some time
10815 * after iemMemStackPopDoneSpecial() has been
10816 * called.
10817 */
10818IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10819{
10820 Assert(cbMem < UINT8_MAX);
10821 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10822 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10823}
10824
10825
10826/**
10827 * Continue a special stack pop (used by iret and retf).
10828 *
10829 * This will raise \#SS or \#PF if appropriate.
10830 *
10831 * @returns Strict VBox status code.
10832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10833 * @param cbMem The number of bytes to pop from the stack.
10834 * @param ppvMem Where to return the pointer to the stack memory.
10835 * @param puNewRsp Where to return the new RSP value. This must be
10836 * assigned to CPUMCTX::rsp manually some time
10837 * after iemMemStackPopDoneSpecial() has been
10838 * called.
10839 */
10840IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10841{
10842 Assert(cbMem < UINT8_MAX);
10843 RTUINT64U NewRsp;
10844 NewRsp.u = *puNewRsp;
10845 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10846 *puNewRsp = NewRsp.u;
10847 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10848}
10849
10850
10851/**
10852 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10853 * iemMemStackPopContinueSpecial).
10854 *
10855 * The caller will manually commit the rSP.
10856 *
10857 * @returns Strict VBox status code.
10858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10859 * @param pvMem The pointer returned by
10860 * iemMemStackPopBeginSpecial() or
10861 * iemMemStackPopContinueSpecial().
10862 */
10863IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10864{
10865 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10866}
10867
10868
10869/**
10870 * Fetches a system table byte.
10871 *
10872 * @returns Strict VBox status code.
10873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10874 * @param pbDst Where to return the byte.
10875 * @param iSegReg The index of the segment register to use for
10876 * this access. The base and limits are checked.
10877 * @param GCPtrMem The address of the guest memory.
10878 */
10879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10880{
10881 /* The lazy approach for now... */
10882 uint8_t const *pbSrc;
10883 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10884 if (rc == VINF_SUCCESS)
10885 {
10886 *pbDst = *pbSrc;
10887 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10888 }
10889 return rc;
10890}
10891
10892
10893/**
10894 * Fetches a system table word.
10895 *
10896 * @returns Strict VBox status code.
10897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10898 * @param pu16Dst Where to return the word.
10899 * @param iSegReg The index of the segment register to use for
10900 * this access. The base and limits are checked.
10901 * @param GCPtrMem The address of the guest memory.
10902 */
10903IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10904{
10905 /* The lazy approach for now... */
10906 uint16_t const *pu16Src;
10907 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10908 if (rc == VINF_SUCCESS)
10909 {
10910 *pu16Dst = *pu16Src;
10911 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10912 }
10913 return rc;
10914}
10915
10916
10917/**
10918 * Fetches a system table dword.
10919 *
10920 * @returns Strict VBox status code.
10921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10922 * @param pu32Dst Where to return the dword.
10923 * @param iSegReg The index of the segment register to use for
10924 * this access. The base and limits are checked.
10925 * @param GCPtrMem The address of the guest memory.
10926 */
10927IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10928{
10929 /* The lazy approach for now... */
10930 uint32_t const *pu32Src;
10931 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10932 if (rc == VINF_SUCCESS)
10933 {
10934 *pu32Dst = *pu32Src;
10935 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10936 }
10937 return rc;
10938}
10939
10940
10941/**
10942 * Fetches a system table qword.
10943 *
10944 * @returns Strict VBox status code.
10945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10946 * @param pu64Dst Where to return the qword.
10947 * @param iSegReg The index of the segment register to use for
10948 * this access. The base and limits are checked.
10949 * @param GCPtrMem The address of the guest memory.
10950 */
10951IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10952{
10953 /* The lazy approach for now... */
10954 uint64_t const *pu64Src;
10955 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10956 if (rc == VINF_SUCCESS)
10957 {
10958 *pu64Dst = *pu64Src;
10959 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10960 }
10961 return rc;
10962}
10963
10964
10965/**
10966 * Fetches a descriptor table entry with caller specified error code.
10967 *
10968 * @returns Strict VBox status code.
10969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10970 * @param pDesc Where to return the descriptor table entry.
10971 * @param uSel The selector which table entry to fetch.
10972 * @param uXcpt The exception to raise on table lookup error.
10973 * @param uErrorCode The error code associated with the exception.
10974 */
10975IEM_STATIC VBOXSTRICTRC
10976iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10977{
10978 AssertPtr(pDesc);
10979 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10980
10981 /** @todo did the 286 require all 8 bytes to be accessible? */
10982 /*
10983 * Get the selector table base and check bounds.
10984 */
10985 RTGCPTR GCPtrBase;
10986 if (uSel & X86_SEL_LDT)
10987 {
10988 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10989 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10990 {
10991 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10992 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10993 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10994 uErrorCode, 0);
10995 }
10996
10997 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10998 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10999 }
11000 else
11001 {
11002 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
11003 {
11004 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11005 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11006 uErrorCode, 0);
11007 }
11008 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11009 }
11010
11011 /*
11012 * Read the legacy descriptor and maybe the long mode extensions if
11013 * required.
11014 */
11015 VBOXSTRICTRC rcStrict;
11016 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11017 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11018 else
11019 {
11020 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11021 if (rcStrict == VINF_SUCCESS)
11022 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11023 if (rcStrict == VINF_SUCCESS)
11024 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11025 if (rcStrict == VINF_SUCCESS)
11026 pDesc->Legacy.au16[3] = 0;
11027 else
11028 return rcStrict;
11029 }
11030
11031 if (rcStrict == VINF_SUCCESS)
11032 {
11033 if ( !IEM_IS_LONG_MODE(pVCpu)
11034 || pDesc->Legacy.Gen.u1DescType)
11035 pDesc->Long.au64[1] = 0;
11036 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11037 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11038 else
11039 {
11040 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11041 /** @todo is this the right exception? */
11042 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11043 }
11044 }
11045 return rcStrict;
11046}
11047
11048
11049/**
11050 * Fetches a descriptor table entry.
11051 *
11052 * @returns Strict VBox status code.
11053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11054 * @param pDesc Where to return the descriptor table entry.
11055 * @param uSel The selector which table entry to fetch.
11056 * @param uXcpt The exception to raise on table lookup error.
11057 */
11058IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11059{
11060 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11061}
11062
11063
11064/**
11065 * Fakes a long mode stack selector for SS = 0.
11066 *
11067 * @param pDescSs Where to return the fake stack descriptor.
11068 * @param uDpl The DPL we want.
11069 */
11070IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11071{
11072 pDescSs->Long.au64[0] = 0;
11073 pDescSs->Long.au64[1] = 0;
11074 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11075 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11076 pDescSs->Long.Gen.u2Dpl = uDpl;
11077 pDescSs->Long.Gen.u1Present = 1;
11078 pDescSs->Long.Gen.u1Long = 1;
11079}
11080
11081
11082/**
11083 * Marks the selector descriptor as accessed (only non-system descriptors).
11084 *
11085 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11086 * will therefore skip the limit checks.
11087 *
11088 * @returns Strict VBox status code.
11089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11090 * @param uSel The selector.
11091 */
11092IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11093{
11094 /*
11095 * Get the selector table base and calculate the entry address.
11096 */
11097 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11098 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11099 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11100 GCPtr += uSel & X86_SEL_MASK;
11101
11102 /*
11103 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11104 * ugly stuff to avoid this. This will make sure it's an atomic access
11105 * as well more or less remove any question about 8-bit or 32-bit accesss.
11106 */
11107 VBOXSTRICTRC rcStrict;
11108 uint32_t volatile *pu32;
11109 if ((GCPtr & 3) == 0)
11110 {
11111 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11112 GCPtr += 2 + 2;
11113 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11114 if (rcStrict != VINF_SUCCESS)
11115 return rcStrict;
11116 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11117 }
11118 else
11119 {
11120 /* The misaligned GDT/LDT case, map the whole thing. */
11121 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11122 if (rcStrict != VINF_SUCCESS)
11123 return rcStrict;
11124 switch ((uintptr_t)pu32 & 3)
11125 {
11126 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11127 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11128 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11129 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11130 }
11131 }
11132
11133 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11134}
11135
11136/** @} */
11137
11138
11139/*
11140 * Include the C/C++ implementation of instruction.
11141 */
11142#include "IEMAllCImpl.cpp.h"
11143
11144
11145
11146/** @name "Microcode" macros.
11147 *
11148 * The idea is that we should be able to use the same code to interpret
11149 * instructions as well as recompiler instructions. Thus this obfuscation.
11150 *
11151 * @{
11152 */
11153#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11154#define IEM_MC_END() }
11155#define IEM_MC_PAUSE() do {} while (0)
11156#define IEM_MC_CONTINUE() do {} while (0)
11157
11158/** Internal macro. */
11159#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11160 do \
11161 { \
11162 VBOXSTRICTRC rcStrict2 = a_Expr; \
11163 if (rcStrict2 != VINF_SUCCESS) \
11164 return rcStrict2; \
11165 } while (0)
11166
11167
11168#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11169#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11170#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11171#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11172#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11173#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11174#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11175#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11176#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11177 do { \
11178 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11179 return iemRaiseDeviceNotAvailable(pVCpu); \
11180 } while (0)
11181#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11182 do { \
11183 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11184 return iemRaiseDeviceNotAvailable(pVCpu); \
11185 } while (0)
11186#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11187 do { \
11188 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11189 return iemRaiseMathFault(pVCpu); \
11190 } while (0)
11191#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11192 do { \
11193 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11194 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11195 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11196 return iemRaiseUndefinedOpcode(pVCpu); \
11197 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11198 return iemRaiseDeviceNotAvailable(pVCpu); \
11199 } while (0)
11200#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11201 do { \
11202 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11203 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11204 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11205 return iemRaiseUndefinedOpcode(pVCpu); \
11206 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11207 return iemRaiseDeviceNotAvailable(pVCpu); \
11208 } while (0)
11209#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11210 do { \
11211 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11212 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11213 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11214 return iemRaiseUndefinedOpcode(pVCpu); \
11215 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11216 return iemRaiseDeviceNotAvailable(pVCpu); \
11217 } while (0)
11218#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11219 do { \
11220 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11221 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11222 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11223 return iemRaiseUndefinedOpcode(pVCpu); \
11224 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11225 return iemRaiseDeviceNotAvailable(pVCpu); \
11226 } while (0)
11227#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11228 do { \
11229 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11230 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11231 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11232 return iemRaiseUndefinedOpcode(pVCpu); \
11233 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11234 return iemRaiseDeviceNotAvailable(pVCpu); \
11235 } while (0)
11236#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11237 do { \
11238 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11239 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11240 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11241 return iemRaiseUndefinedOpcode(pVCpu); \
11242 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11243 return iemRaiseDeviceNotAvailable(pVCpu); \
11244 } while (0)
11245#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11246 do { \
11247 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11248 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11249 return iemRaiseUndefinedOpcode(pVCpu); \
11250 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11251 return iemRaiseDeviceNotAvailable(pVCpu); \
11252 } while (0)
11253#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11254 do { \
11255 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11256 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11257 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11258 return iemRaiseUndefinedOpcode(pVCpu); \
11259 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11260 return iemRaiseDeviceNotAvailable(pVCpu); \
11261 } while (0)
11262#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11263 do { \
11264 if (pVCpu->iem.s.uCpl != 0) \
11265 return iemRaiseGeneralProtectionFault0(pVCpu); \
11266 } while (0)
11267#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11268 do { \
11269 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11270 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11271 } while (0)
11272#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11273 do { \
11274 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11275 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11276 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11277 return iemRaiseUndefinedOpcode(pVCpu); \
11278 } while (0)
11279#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11280 do { \
11281 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11282 return iemRaiseGeneralProtectionFault0(pVCpu); \
11283 } while (0)
11284
11285
11286#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11287#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11288#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11289#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11290#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11291#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11292#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11293 uint32_t a_Name; \
11294 uint32_t *a_pName = &a_Name
11295#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11296 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11297
11298#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11299#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11300
11301#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11318#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11319 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11320 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11321 } while (0)
11322#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11323 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11324 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11325 } while (0)
11326#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11327 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11328 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11329 } while (0)
11330/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11331#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11332 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11333 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11334 } while (0)
11335#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11336 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11337 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11338 } while (0)
11339/** @note Not for IOPL or IF testing or modification. */
11340#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11341#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11342#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11343#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11344
11345#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11346#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11347#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11348#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11349#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11350#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11351#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11352#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11353#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11354#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11355/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11356#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11357 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11358 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11359 } while (0)
11360#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11361 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11362 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11363 } while (0)
11364#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11365 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11366
11367
11368#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11369#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11370/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11371 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11372#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11373#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11374/** @note Not for IOPL or IF testing or modification. */
11375#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11376
11377#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11378#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11379#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11380 do { \
11381 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11382 *pu32Reg += (a_u32Value); \
11383 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11384 } while (0)
11385#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11386
11387#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11388#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11389#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11390 do { \
11391 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11392 *pu32Reg -= (a_u32Value); \
11393 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11394 } while (0)
11395#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11396#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11397
11398#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11399#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11400#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11401#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11402#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11403#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11404#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11405
11406#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11407#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11408#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11409#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11410
11411#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11412#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11413#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11414
11415#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11416#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11417#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11418
11419#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11420#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11421#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11422
11423#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11424#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11425#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11426
11427#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11428
11429#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11430
11431#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11432#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11433#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11434 do { \
11435 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11436 *pu32Reg &= (a_u32Value); \
11437 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11438 } while (0)
11439#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11440
11441#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11442#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11443#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11444 do { \
11445 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11446 *pu32Reg |= (a_u32Value); \
11447 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11448 } while (0)
11449#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11450
11451
11452/** @note Not for IOPL or IF modification. */
11453#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11454/** @note Not for IOPL or IF modification. */
11455#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11456/** @note Not for IOPL or IF modification. */
11457#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11458
11459#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11460
11461/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11462#define IEM_MC_FPU_TO_MMX_MODE() do { \
11463 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11464 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11465 } while (0)
11466
11467/** Switches the FPU state from MMX mode (FTW=0xffff). */
11468#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11469 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11470 } while (0)
11471
11472#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11473 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11474#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11475 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11476#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11477 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11478 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11479 } while (0)
11480#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11481 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11482 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11483 } while (0)
11484#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11485 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11486#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11487 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11488#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11489 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11490
11491#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11492 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11493 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11494 } while (0)
11495#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11496 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11497#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11498 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11499#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11500 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11501#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11502 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11503 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11504 } while (0)
11505#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11506 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11507#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11508 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11509 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11510 } while (0)
11511#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11512 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11513#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11514 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11515 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11516 } while (0)
11517#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11518 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11519#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11520 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11521#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11522 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11523#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11524 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11525#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11526 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11527 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11528 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11529 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11530 } while (0)
11531
11532#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11533 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11534 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11535 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11536 } while (0)
11537#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11538 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11539 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11540 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11541 } while (0)
11542#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11543 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11544 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11545 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11546 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11547 } while (0)
11548#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11549 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11550 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11551 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11552 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11553 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11554 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11555 } while (0)
11556
11557#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11558#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11559 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11560 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11561 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11564 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11566 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11567 } while (0)
11568#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11569 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11570 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11571 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11572 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11573 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11574 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11575 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11576 } while (0)
11577#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11578 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11579 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11580 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11581 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11582 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11583 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11584 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11585 } while (0)
11586#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11587 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11588 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11589 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11590 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11591 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11592 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11593 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11594 } while (0)
11595
11596#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11597 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11598#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11599 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11600#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11601 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11602#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11603 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11604 uintptr_t const iYRegTmp = (a_iYReg); \
11605 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11606 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11607 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11608 } while (0)
11609
11610#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11611 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11612 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11613 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11614 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11615 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11616 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11617 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11618 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11619 } while (0)
11620#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11621 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11622 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11623 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11624 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11625 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11626 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11627 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11628 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11629 } while (0)
11630#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11631 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11632 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11633 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11634 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11635 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11636 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11637 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11638 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11639 } while (0)
11640
11641#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11642 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11643 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11644 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11645 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11646 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11647 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11648 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11649 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11650 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11651 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11652 } while (0)
11653#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11654 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11655 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11656 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11657 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11658 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11659 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11660 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11661 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11662 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11663 } while (0)
11664#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11665 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11666 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11667 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11668 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11669 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11670 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11671 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11672 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11673 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11674 } while (0)
11675#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11676 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11677 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11678 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11679 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11680 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11681 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11682 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11683 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11684 } while (0)
11685
11686#ifndef IEM_WITH_SETJMP
11687# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11689# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11691# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11693#else
11694# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11695 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11696# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11697 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11698# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11699 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11700#endif
11701
11702#ifndef IEM_WITH_SETJMP
11703# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11707# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11709#else
11710# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11711 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11713 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11714# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11715 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11716#endif
11717
11718#ifndef IEM_WITH_SETJMP
11719# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11721# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11723# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11725#else
11726# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11727 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11729 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11730# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11731 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11732#endif
11733
11734#ifdef SOME_UNUSED_FUNCTION
11735# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11737#endif
11738
11739#ifndef IEM_WITH_SETJMP
11740# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11742# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11744# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11746# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11748#else
11749# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11750 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11751# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11752 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11753# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11754 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11755# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11756 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11757#endif
11758
11759#ifndef IEM_WITH_SETJMP
11760# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11762# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11764# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11766#else
11767# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11768 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11769# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11770 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11771# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11772 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11773#endif
11774
11775#ifndef IEM_WITH_SETJMP
11776# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11778# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11779 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11780#else
11781# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11782 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11783# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11784 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11785#endif
11786
11787#ifndef IEM_WITH_SETJMP
11788# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11789 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11790# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11791 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11792#else
11793# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11794 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11795# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11796 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11797#endif
11798
11799
11800
11801#ifndef IEM_WITH_SETJMP
11802# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11803 do { \
11804 uint8_t u8Tmp; \
11805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11806 (a_u16Dst) = u8Tmp; \
11807 } while (0)
11808# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11809 do { \
11810 uint8_t u8Tmp; \
11811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11812 (a_u32Dst) = u8Tmp; \
11813 } while (0)
11814# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11815 do { \
11816 uint8_t u8Tmp; \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11818 (a_u64Dst) = u8Tmp; \
11819 } while (0)
11820# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11821 do { \
11822 uint16_t u16Tmp; \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11824 (a_u32Dst) = u16Tmp; \
11825 } while (0)
11826# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11827 do { \
11828 uint16_t u16Tmp; \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11830 (a_u64Dst) = u16Tmp; \
11831 } while (0)
11832# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11833 do { \
11834 uint32_t u32Tmp; \
11835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11836 (a_u64Dst) = u32Tmp; \
11837 } while (0)
11838#else /* IEM_WITH_SETJMP */
11839# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11840 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11841# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11842 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11843# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11844 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11845# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11846 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11847# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11848 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11849# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11850 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11851#endif /* IEM_WITH_SETJMP */
11852
11853#ifndef IEM_WITH_SETJMP
11854# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11855 do { \
11856 uint8_t u8Tmp; \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11858 (a_u16Dst) = (int8_t)u8Tmp; \
11859 } while (0)
11860# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11861 do { \
11862 uint8_t u8Tmp; \
11863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11864 (a_u32Dst) = (int8_t)u8Tmp; \
11865 } while (0)
11866# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11867 do { \
11868 uint8_t u8Tmp; \
11869 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11870 (a_u64Dst) = (int8_t)u8Tmp; \
11871 } while (0)
11872# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11873 do { \
11874 uint16_t u16Tmp; \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11876 (a_u32Dst) = (int16_t)u16Tmp; \
11877 } while (0)
11878# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11879 do { \
11880 uint16_t u16Tmp; \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11882 (a_u64Dst) = (int16_t)u16Tmp; \
11883 } while (0)
11884# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11885 do { \
11886 uint32_t u32Tmp; \
11887 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11888 (a_u64Dst) = (int32_t)u32Tmp; \
11889 } while (0)
11890#else /* IEM_WITH_SETJMP */
11891# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11892 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11893# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11894 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11895# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11896 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11897# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11898 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11899# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11900 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11901# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11902 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11903#endif /* IEM_WITH_SETJMP */
11904
11905#ifndef IEM_WITH_SETJMP
11906# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11908# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11910# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11911 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11912# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11913 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11914#else
11915# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11916 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11917# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11918 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11919# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11920 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11921# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11922 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11923#endif
11924
11925#ifndef IEM_WITH_SETJMP
11926# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11927 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11928# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11929 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11930# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11931 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11932# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11933 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11934#else
11935# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11936 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11937# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11938 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11939# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11940 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11941# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11942 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11943#endif
11944
11945#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11946#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11947#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11948#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11949#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11950#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11951#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11952 do { \
11953 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11954 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11955 } while (0)
11956
11957#ifndef IEM_WITH_SETJMP
11958# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11959 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11960# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11961 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11962#else
11963# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11964 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11965# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11966 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11967#endif
11968
11969#ifndef IEM_WITH_SETJMP
11970# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11971 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11972# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11973 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11974#else
11975# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11976 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11977# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11978 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11979#endif
11980
11981
11982#define IEM_MC_PUSH_U16(a_u16Value) \
11983 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11984#define IEM_MC_PUSH_U32(a_u32Value) \
11985 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11986#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11987 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11988#define IEM_MC_PUSH_U64(a_u64Value) \
11989 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11990
11991#define IEM_MC_POP_U16(a_pu16Value) \
11992 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11993#define IEM_MC_POP_U32(a_pu32Value) \
11994 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11995#define IEM_MC_POP_U64(a_pu64Value) \
11996 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11997
11998/** Maps guest memory for direct or bounce buffered access.
11999 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12000 * @remarks May return.
12001 */
12002#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12003 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12004
12005/** Maps guest memory for direct or bounce buffered access.
12006 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12007 * @remarks May return.
12008 */
12009#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12010 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12011
12012/** Commits the memory and unmaps the guest memory.
12013 * @remarks May return.
12014 */
12015#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12016 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12017
12018/** Commits the memory and unmaps the guest memory unless the FPU status word
12019 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12020 * that would cause FLD not to store.
12021 *
12022 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12023 * store, while \#P will not.
12024 *
12025 * @remarks May in theory return - for now.
12026 */
12027#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12028 do { \
12029 if ( !(a_u16FSW & X86_FSW_ES) \
12030 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12031 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12032 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12033 } while (0)
12034
12035/** Calculate efficient address from R/M. */
12036#ifndef IEM_WITH_SETJMP
12037# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12038 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12039#else
12040# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12041 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12042#endif
12043
12044#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12045#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12046#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12047#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12048#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12049#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12050#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12051
12052/**
12053 * Defers the rest of the instruction emulation to a C implementation routine
12054 * and returns, only taking the standard parameters.
12055 *
12056 * @param a_pfnCImpl The pointer to the C routine.
12057 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12058 */
12059#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12060
12061/**
12062 * Defers the rest of instruction emulation to a C implementation routine and
12063 * returns, taking one argument in addition to the standard ones.
12064 *
12065 * @param a_pfnCImpl The pointer to the C routine.
12066 * @param a0 The argument.
12067 */
12068#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12069
12070/**
12071 * Defers the rest of the instruction emulation to a C implementation routine
12072 * and returns, taking two arguments in addition to the standard ones.
12073 *
12074 * @param a_pfnCImpl The pointer to the C routine.
12075 * @param a0 The first extra argument.
12076 * @param a1 The second extra argument.
12077 */
12078#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12079
12080/**
12081 * Defers the rest of the instruction emulation to a C implementation routine
12082 * and returns, taking three arguments in addition to the standard ones.
12083 *
12084 * @param a_pfnCImpl The pointer to the C routine.
12085 * @param a0 The first extra argument.
12086 * @param a1 The second extra argument.
12087 * @param a2 The third extra argument.
12088 */
12089#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12090
12091/**
12092 * Defers the rest of the instruction emulation to a C implementation routine
12093 * and returns, taking four arguments in addition to the standard ones.
12094 *
12095 * @param a_pfnCImpl The pointer to the C routine.
12096 * @param a0 The first extra argument.
12097 * @param a1 The second extra argument.
12098 * @param a2 The third extra argument.
12099 * @param a3 The fourth extra argument.
12100 */
12101#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12102
12103/**
12104 * Defers the rest of the instruction emulation to a C implementation routine
12105 * and returns, taking two arguments in addition to the standard ones.
12106 *
12107 * @param a_pfnCImpl The pointer to the C routine.
12108 * @param a0 The first extra argument.
12109 * @param a1 The second extra argument.
12110 * @param a2 The third extra argument.
12111 * @param a3 The fourth extra argument.
12112 * @param a4 The fifth extra argument.
12113 */
12114#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12115
12116/**
12117 * Defers the entire instruction emulation to a C implementation routine and
12118 * returns, only taking the standard parameters.
12119 *
12120 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12121 *
12122 * @param a_pfnCImpl The pointer to the C routine.
12123 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12124 */
12125#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12126
12127/**
12128 * Defers the entire instruction emulation to a C implementation routine and
12129 * returns, taking one argument in addition to the standard ones.
12130 *
12131 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12132 *
12133 * @param a_pfnCImpl The pointer to the C routine.
12134 * @param a0 The argument.
12135 */
12136#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12137
12138/**
12139 * Defers the entire instruction emulation to a C implementation routine and
12140 * returns, taking two arguments in addition to the standard ones.
12141 *
12142 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12143 *
12144 * @param a_pfnCImpl The pointer to the C routine.
12145 * @param a0 The first extra argument.
12146 * @param a1 The second extra argument.
12147 */
12148#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12149
12150/**
12151 * Defers the entire instruction emulation to a C implementation routine and
12152 * returns, taking three arguments in addition to the standard ones.
12153 *
12154 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12155 *
12156 * @param a_pfnCImpl The pointer to the C routine.
12157 * @param a0 The first extra argument.
12158 * @param a1 The second extra argument.
12159 * @param a2 The third extra argument.
12160 */
12161#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12162
12163/**
12164 * Calls a FPU assembly implementation taking one visible argument.
12165 *
12166 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12167 * @param a0 The first extra argument.
12168 */
12169#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12170 do { \
12171 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12172 } while (0)
12173
12174/**
12175 * Calls a FPU assembly implementation taking two visible arguments.
12176 *
12177 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12178 * @param a0 The first extra argument.
12179 * @param a1 The second extra argument.
12180 */
12181#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12182 do { \
12183 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12184 } while (0)
12185
12186/**
12187 * Calls a FPU assembly implementation taking three visible arguments.
12188 *
12189 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12190 * @param a0 The first extra argument.
12191 * @param a1 The second extra argument.
12192 * @param a2 The third extra argument.
12193 */
12194#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12195 do { \
12196 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12197 } while (0)
12198
12199#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12200 do { \
12201 (a_FpuData).FSW = (a_FSW); \
12202 (a_FpuData).r80Result = *(a_pr80Value); \
12203 } while (0)
12204
12205/** Pushes FPU result onto the stack. */
12206#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12207 iemFpuPushResult(pVCpu, &a_FpuData)
12208/** Pushes FPU result onto the stack and sets the FPUDP. */
12209#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12210 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12211
12212/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12213#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12214 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12215
12216/** Stores FPU result in a stack register. */
12217#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12218 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12219/** Stores FPU result in a stack register and pops the stack. */
12220#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12221 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12222/** Stores FPU result in a stack register and sets the FPUDP. */
12223#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12224 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12225/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12226 * stack. */
12227#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12228 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12229
12230/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12231#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12232 iemFpuUpdateOpcodeAndIp(pVCpu)
12233/** Free a stack register (for FFREE and FFREEP). */
12234#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12235 iemFpuStackFree(pVCpu, a_iStReg)
12236/** Increment the FPU stack pointer. */
12237#define IEM_MC_FPU_STACK_INC_TOP() \
12238 iemFpuStackIncTop(pVCpu)
12239/** Decrement the FPU stack pointer. */
12240#define IEM_MC_FPU_STACK_DEC_TOP() \
12241 iemFpuStackDecTop(pVCpu)
12242
12243/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12244#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12245 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12246/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12247#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12248 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12249/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12250#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12251 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12252/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12253#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12254 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12255/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12256 * stack. */
12257#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12258 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12259/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12260#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12261 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12262
12263/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12264#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12265 iemFpuStackUnderflow(pVCpu, a_iStDst)
12266/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12267 * stack. */
12268#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12269 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12270/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12271 * FPUDS. */
12272#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12273 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12274/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12275 * FPUDS. Pops stack. */
12276#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12277 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12278/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12279 * stack twice. */
12280#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12281 iemFpuStackUnderflowThenPopPop(pVCpu)
12282/** Raises a FPU stack underflow exception for an instruction pushing a result
12283 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12284#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12285 iemFpuStackPushUnderflow(pVCpu)
12286/** Raises a FPU stack underflow exception for an instruction pushing a result
12287 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12288#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12289 iemFpuStackPushUnderflowTwo(pVCpu)
12290
12291/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12292 * FPUIP, FPUCS and FOP. */
12293#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12294 iemFpuStackPushOverflow(pVCpu)
12295/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12296 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12297#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12298 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12299/** Prepares for using the FPU state.
12300 * Ensures that we can use the host FPU in the current context (RC+R0.
12301 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12302#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12303/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12304#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12305/** Actualizes the guest FPU state so it can be accessed and modified. */
12306#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12307
12308/** Prepares for using the SSE state.
12309 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12310 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12311#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12312/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12313#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12314/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12315#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12316
12317/** Prepares for using the AVX state.
12318 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12319 * Ensures the guest AVX state in the CPUMCTX is up to date.
12320 * @note This will include the AVX512 state too when support for it is added
12321 * due to the zero extending feature of VEX instruction. */
12322#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12323/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12324#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12325/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12326#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12327
12328/**
12329 * Calls a MMX assembly implementation taking two visible arguments.
12330 *
12331 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12332 * @param a0 The first extra argument.
12333 * @param a1 The second extra argument.
12334 */
12335#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12336 do { \
12337 IEM_MC_PREPARE_FPU_USAGE(); \
12338 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12339 } while (0)
12340
12341/**
12342 * Calls a MMX assembly implementation taking three visible arguments.
12343 *
12344 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12345 * @param a0 The first extra argument.
12346 * @param a1 The second extra argument.
12347 * @param a2 The third extra argument.
12348 */
12349#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12350 do { \
12351 IEM_MC_PREPARE_FPU_USAGE(); \
12352 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12353 } while (0)
12354
12355
12356/**
12357 * Calls a SSE assembly implementation taking two visible arguments.
12358 *
12359 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12360 * @param a0 The first extra argument.
12361 * @param a1 The second extra argument.
12362 */
12363#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12364 do { \
12365 IEM_MC_PREPARE_SSE_USAGE(); \
12366 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12367 } while (0)
12368
12369/**
12370 * Calls a SSE assembly implementation taking three visible arguments.
12371 *
12372 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12373 * @param a0 The first extra argument.
12374 * @param a1 The second extra argument.
12375 * @param a2 The third extra argument.
12376 */
12377#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12378 do { \
12379 IEM_MC_PREPARE_SSE_USAGE(); \
12380 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12381 } while (0)
12382
12383
12384/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12385 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12386#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12387 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12388
12389/**
12390 * Calls a AVX assembly implementation taking two visible arguments.
12391 *
12392 * There is one implicit zero'th argument, a pointer to the extended state.
12393 *
12394 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12395 * @param a1 The first extra argument.
12396 * @param a2 The second extra argument.
12397 */
12398#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12399 do { \
12400 IEM_MC_PREPARE_AVX_USAGE(); \
12401 a_pfnAImpl(pXState, (a1), (a2)); \
12402 } while (0)
12403
12404/**
12405 * Calls a AVX assembly implementation taking three visible arguments.
12406 *
12407 * There is one implicit zero'th argument, a pointer to the extended state.
12408 *
12409 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12410 * @param a1 The first extra argument.
12411 * @param a2 The second extra argument.
12412 * @param a3 The third extra argument.
12413 */
12414#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12415 do { \
12416 IEM_MC_PREPARE_AVX_USAGE(); \
12417 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12418 } while (0)
12419
12420/** @note Not for IOPL or IF testing. */
12421#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12422/** @note Not for IOPL or IF testing. */
12423#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12424/** @note Not for IOPL or IF testing. */
12425#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12426/** @note Not for IOPL or IF testing. */
12427#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12428/** @note Not for IOPL or IF testing. */
12429#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12430 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12431 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12432/** @note Not for IOPL or IF testing. */
12433#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12434 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12435 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12436/** @note Not for IOPL or IF testing. */
12437#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12438 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12439 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12440 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12441/** @note Not for IOPL or IF testing. */
12442#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12443 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12444 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12445 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12446#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12447#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12448#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12449/** @note Not for IOPL or IF testing. */
12450#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12451 if ( pVCpu->cpum.GstCtx.cx != 0 \
12452 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12453/** @note Not for IOPL or IF testing. */
12454#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12455 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12456 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12457/** @note Not for IOPL or IF testing. */
12458#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12459 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12460 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12461/** @note Not for IOPL or IF testing. */
12462#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12463 if ( pVCpu->cpum.GstCtx.cx != 0 \
12464 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12465/** @note Not for IOPL or IF testing. */
12466#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12467 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12468 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12469/** @note Not for IOPL or IF testing. */
12470#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12471 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12472 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12473#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12474#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12475
12476#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12477 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12478#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12479 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12480#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12481 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12482#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12483 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12484#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12485 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12486#define IEM_MC_IF_FCW_IM() \
12487 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12488
12489#define IEM_MC_ELSE() } else {
12490#define IEM_MC_ENDIF() } do {} while (0)
12491
12492/** @} */
12493
12494
12495/** @name Opcode Debug Helpers.
12496 * @{
12497 */
12498#ifdef VBOX_WITH_STATISTICS
12499# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12500#else
12501# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12502#endif
12503
12504#ifdef DEBUG
12505# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12506 do { \
12507 IEMOP_INC_STATS(a_Stats); \
12508 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12509 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12510 } while (0)
12511
12512# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12513 do { \
12514 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12515 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12516 (void)RT_CONCAT(OP_,a_Upper); \
12517 (void)(a_fDisHints); \
12518 (void)(a_fIemHints); \
12519 } while (0)
12520
12521# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12522 do { \
12523 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12524 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12525 (void)RT_CONCAT(OP_,a_Upper); \
12526 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12527 (void)(a_fDisHints); \
12528 (void)(a_fIemHints); \
12529 } while (0)
12530
12531# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12532 do { \
12533 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12534 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12535 (void)RT_CONCAT(OP_,a_Upper); \
12536 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12537 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12538 (void)(a_fDisHints); \
12539 (void)(a_fIemHints); \
12540 } while (0)
12541
12542# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12543 do { \
12544 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12545 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12546 (void)RT_CONCAT(OP_,a_Upper); \
12547 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12548 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12549 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12550 (void)(a_fDisHints); \
12551 (void)(a_fIemHints); \
12552 } while (0)
12553
12554# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12555 do { \
12556 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12557 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12558 (void)RT_CONCAT(OP_,a_Upper); \
12559 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12560 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12561 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12562 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12563 (void)(a_fDisHints); \
12564 (void)(a_fIemHints); \
12565 } while (0)
12566
12567#else
12568# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12569
12570# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12571 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12572# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12573 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12574# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12575 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12576# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12577 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12578# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12579 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12580
12581#endif
12582
12583#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12584 IEMOP_MNEMONIC0EX(a_Lower, \
12585 #a_Lower, \
12586 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12587#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12588 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12589 #a_Lower " " #a_Op1, \
12590 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12591#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12592 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12593 #a_Lower " " #a_Op1 "," #a_Op2, \
12594 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12595#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12596 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12597 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12598 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12599#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12600 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12601 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12602 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12603
12604/** @} */
12605
12606
12607/** @name Opcode Helpers.
12608 * @{
12609 */
12610
12611#ifdef IN_RING3
12612# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12613 do { \
12614 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12615 else \
12616 { \
12617 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12618 return IEMOP_RAISE_INVALID_OPCODE(); \
12619 } \
12620 } while (0)
12621#else
12622# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12623 do { \
12624 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12625 else return IEMOP_RAISE_INVALID_OPCODE(); \
12626 } while (0)
12627#endif
12628
12629/** The instruction requires a 186 or later. */
12630#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12631# define IEMOP_HLP_MIN_186() do { } while (0)
12632#else
12633# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12634#endif
12635
12636/** The instruction requires a 286 or later. */
12637#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12638# define IEMOP_HLP_MIN_286() do { } while (0)
12639#else
12640# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12641#endif
12642
12643/** The instruction requires a 386 or later. */
12644#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12645# define IEMOP_HLP_MIN_386() do { } while (0)
12646#else
12647# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12648#endif
12649
12650/** The instruction requires a 386 or later if the given expression is true. */
12651#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12652# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12653#else
12654# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12655#endif
12656
12657/** The instruction requires a 486 or later. */
12658#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12659# define IEMOP_HLP_MIN_486() do { } while (0)
12660#else
12661# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12662#endif
12663
12664/** The instruction requires a Pentium (586) or later. */
12665#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12666# define IEMOP_HLP_MIN_586() do { } while (0)
12667#else
12668# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12669#endif
12670
12671/** The instruction requires a PentiumPro (686) or later. */
12672#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12673# define IEMOP_HLP_MIN_686() do { } while (0)
12674#else
12675# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12676#endif
12677
12678
12679/** The instruction raises an \#UD in real and V8086 mode. */
12680#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12681 do \
12682 { \
12683 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12684 else return IEMOP_RAISE_INVALID_OPCODE(); \
12685 } while (0)
12686
12687#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12688/** This instruction raises an \#UD in real and V8086 mode or when not using a
12689 * 64-bit code segment when in long mode (applicable to all VMX instructions
12690 * except VMCALL).
12691 */
12692#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12693 do \
12694 { \
12695 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12696 && ( !IEM_IS_LONG_MODE(pVCpu) \
12697 || IEM_IS_64BIT_CODE(pVCpu))) \
12698 { /* likely */ } \
12699 else \
12700 { \
12701 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12702 { \
12703 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12704 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12705 return IEMOP_RAISE_INVALID_OPCODE(); \
12706 } \
12707 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12708 { \
12709 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12710 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12711 return IEMOP_RAISE_INVALID_OPCODE(); \
12712 } \
12713 } \
12714 } while (0)
12715
12716/** The instruction can only be executed in VMX operation (VMX root mode and
12717 * non-root mode).
12718 *
12719 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12720 */
12721# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12722 do \
12723 { \
12724 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12725 else \
12726 { \
12727 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12728 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12729 return IEMOP_RAISE_INVALID_OPCODE(); \
12730 } \
12731 } while (0)
12732#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12733
12734/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12735 * 64-bit mode. */
12736#define IEMOP_HLP_NO_64BIT() \
12737 do \
12738 { \
12739 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12740 return IEMOP_RAISE_INVALID_OPCODE(); \
12741 } while (0)
12742
12743/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12744 * 64-bit mode. */
12745#define IEMOP_HLP_ONLY_64BIT() \
12746 do \
12747 { \
12748 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12749 return IEMOP_RAISE_INVALID_OPCODE(); \
12750 } while (0)
12751
12752/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12753#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12754 do \
12755 { \
12756 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12757 iemRecalEffOpSize64Default(pVCpu); \
12758 } while (0)
12759
12760/** The instruction has 64-bit operand size if 64-bit mode. */
12761#define IEMOP_HLP_64BIT_OP_SIZE() \
12762 do \
12763 { \
12764 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12765 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12766 } while (0)
12767
12768/** Only a REX prefix immediately preceeding the first opcode byte takes
12769 * effect. This macro helps ensuring this as well as logging bad guest code. */
12770#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12771 do \
12772 { \
12773 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12774 { \
12775 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12776 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12777 pVCpu->iem.s.uRexB = 0; \
12778 pVCpu->iem.s.uRexIndex = 0; \
12779 pVCpu->iem.s.uRexReg = 0; \
12780 iemRecalEffOpSize(pVCpu); \
12781 } \
12782 } while (0)
12783
12784/**
12785 * Done decoding.
12786 */
12787#define IEMOP_HLP_DONE_DECODING() \
12788 do \
12789 { \
12790 /*nothing for now, maybe later... */ \
12791 } while (0)
12792
12793/**
12794 * Done decoding, raise \#UD exception if lock prefix present.
12795 */
12796#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12797 do \
12798 { \
12799 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12800 { /* likely */ } \
12801 else \
12802 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12803 } while (0)
12804
12805
12806/**
12807 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12808 * repnz or size prefixes are present, or if in real or v8086 mode.
12809 */
12810#define IEMOP_HLP_DONE_VEX_DECODING() \
12811 do \
12812 { \
12813 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12814 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12815 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12816 { /* likely */ } \
12817 else \
12818 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12819 } while (0)
12820
12821/**
12822 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12823 * repnz or size prefixes are present, or if in real or v8086 mode.
12824 */
12825#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12826 do \
12827 { \
12828 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12829 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12830 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12831 && pVCpu->iem.s.uVexLength == 0)) \
12832 { /* likely */ } \
12833 else \
12834 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12835 } while (0)
12836
12837
12838/**
12839 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12840 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12841 * register 0, or if in real or v8086 mode.
12842 */
12843#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12844 do \
12845 { \
12846 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12847 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12848 && !pVCpu->iem.s.uVex3rdReg \
12849 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12850 { /* likely */ } \
12851 else \
12852 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12853 } while (0)
12854
12855/**
12856 * Done decoding VEX, no V, L=0.
12857 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12858 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12859 */
12860#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12861 do \
12862 { \
12863 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12864 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12865 && pVCpu->iem.s.uVexLength == 0 \
12866 && pVCpu->iem.s.uVex3rdReg == 0 \
12867 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12868 { /* likely */ } \
12869 else \
12870 return IEMOP_RAISE_INVALID_OPCODE(); \
12871 } while (0)
12872
12873#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12874 do \
12875 { \
12876 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12877 { /* likely */ } \
12878 else \
12879 { \
12880 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12881 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12882 } \
12883 } while (0)
12884#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12885 do \
12886 { \
12887 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12888 { /* likely */ } \
12889 else \
12890 { \
12891 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12892 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12893 } \
12894 } while (0)
12895
12896/**
12897 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12898 * are present.
12899 */
12900#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12901 do \
12902 { \
12903 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12904 { /* likely */ } \
12905 else \
12906 return IEMOP_RAISE_INVALID_OPCODE(); \
12907 } while (0)
12908
12909/**
12910 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12911 * prefixes are present.
12912 */
12913#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12914 do \
12915 { \
12916 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12917 { /* likely */ } \
12918 else \
12919 return IEMOP_RAISE_INVALID_OPCODE(); \
12920 } while (0)
12921
12922
12923/**
12924 * Calculates the effective address of a ModR/M memory operand.
12925 *
12926 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12927 *
12928 * @return Strict VBox status code.
12929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12930 * @param bRm The ModRM byte.
12931 * @param cbImm The size of any immediate following the
12932 * effective address opcode bytes. Important for
12933 * RIP relative addressing.
12934 * @param pGCPtrEff Where to return the effective address.
12935 */
12936IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12937{
12938 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12939# define SET_SS_DEF() \
12940 do \
12941 { \
12942 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12943 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12944 } while (0)
12945
12946 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12947 {
12948/** @todo Check the effective address size crap! */
12949 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12950 {
12951 uint16_t u16EffAddr;
12952
12953 /* Handle the disp16 form with no registers first. */
12954 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12955 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12956 else
12957 {
12958 /* Get the displacment. */
12959 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12960 {
12961 case 0: u16EffAddr = 0; break;
12962 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12963 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12964 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12965 }
12966
12967 /* Add the base and index registers to the disp. */
12968 switch (bRm & X86_MODRM_RM_MASK)
12969 {
12970 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12971 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12972 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12973 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12974 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12975 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12976 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12977 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12978 }
12979 }
12980
12981 *pGCPtrEff = u16EffAddr;
12982 }
12983 else
12984 {
12985 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12986 uint32_t u32EffAddr;
12987
12988 /* Handle the disp32 form with no registers first. */
12989 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12990 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12991 else
12992 {
12993 /* Get the register (or SIB) value. */
12994 switch ((bRm & X86_MODRM_RM_MASK))
12995 {
12996 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12997 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12998 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12999 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13000 case 4: /* SIB */
13001 {
13002 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13003
13004 /* Get the index and scale it. */
13005 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13006 {
13007 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13008 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13009 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13010 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13011 case 4: u32EffAddr = 0; /*none */ break;
13012 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13013 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13014 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13015 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13016 }
13017 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13018
13019 /* add base */
13020 switch (bSib & X86_SIB_BASE_MASK)
13021 {
13022 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13023 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13024 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13025 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13026 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13027 case 5:
13028 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13029 {
13030 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13031 SET_SS_DEF();
13032 }
13033 else
13034 {
13035 uint32_t u32Disp;
13036 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13037 u32EffAddr += u32Disp;
13038 }
13039 break;
13040 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13041 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13042 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13043 }
13044 break;
13045 }
13046 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13047 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13048 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13049 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13050 }
13051
13052 /* Get and add the displacement. */
13053 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13054 {
13055 case 0:
13056 break;
13057 case 1:
13058 {
13059 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13060 u32EffAddr += i8Disp;
13061 break;
13062 }
13063 case 2:
13064 {
13065 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13066 u32EffAddr += u32Disp;
13067 break;
13068 }
13069 default:
13070 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13071 }
13072
13073 }
13074 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13075 *pGCPtrEff = u32EffAddr;
13076 else
13077 {
13078 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13079 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13080 }
13081 }
13082 }
13083 else
13084 {
13085 uint64_t u64EffAddr;
13086
13087 /* Handle the rip+disp32 form with no registers first. */
13088 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13089 {
13090 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13091 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13092 }
13093 else
13094 {
13095 /* Get the register (or SIB) value. */
13096 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13097 {
13098 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13099 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13100 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13101 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13102 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13103 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13104 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13105 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13106 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13107 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13108 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13109 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13110 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13111 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13112 /* SIB */
13113 case 4:
13114 case 12:
13115 {
13116 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13117
13118 /* Get the index and scale it. */
13119 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13120 {
13121 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13122 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13123 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13124 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13125 case 4: u64EffAddr = 0; /*none */ break;
13126 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13127 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13128 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13129 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13130 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13131 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13132 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13133 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13134 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13135 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13136 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13137 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13138 }
13139 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13140
13141 /* add base */
13142 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13143 {
13144 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13145 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13146 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13147 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13148 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13149 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13150 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13151 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13152 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13153 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13154 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13155 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13156 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13157 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13158 /* complicated encodings */
13159 case 5:
13160 case 13:
13161 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13162 {
13163 if (!pVCpu->iem.s.uRexB)
13164 {
13165 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13166 SET_SS_DEF();
13167 }
13168 else
13169 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13170 }
13171 else
13172 {
13173 uint32_t u32Disp;
13174 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13175 u64EffAddr += (int32_t)u32Disp;
13176 }
13177 break;
13178 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13179 }
13180 break;
13181 }
13182 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13183 }
13184
13185 /* Get and add the displacement. */
13186 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13187 {
13188 case 0:
13189 break;
13190 case 1:
13191 {
13192 int8_t i8Disp;
13193 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13194 u64EffAddr += i8Disp;
13195 break;
13196 }
13197 case 2:
13198 {
13199 uint32_t u32Disp;
13200 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13201 u64EffAddr += (int32_t)u32Disp;
13202 break;
13203 }
13204 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13205 }
13206
13207 }
13208
13209 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13210 *pGCPtrEff = u64EffAddr;
13211 else
13212 {
13213 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13214 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13215 }
13216 }
13217
13218 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13219 return VINF_SUCCESS;
13220}
13221
13222
13223/**
13224 * Calculates the effective address of a ModR/M memory operand.
13225 *
13226 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13227 *
13228 * @return Strict VBox status code.
13229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13230 * @param bRm The ModRM byte.
13231 * @param cbImm The size of any immediate following the
13232 * effective address opcode bytes. Important for
13233 * RIP relative addressing.
13234 * @param pGCPtrEff Where to return the effective address.
13235 * @param offRsp RSP displacement.
13236 */
13237IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13238{
13239 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13240# define SET_SS_DEF() \
13241 do \
13242 { \
13243 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13244 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13245 } while (0)
13246
13247 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13248 {
13249/** @todo Check the effective address size crap! */
13250 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13251 {
13252 uint16_t u16EffAddr;
13253
13254 /* Handle the disp16 form with no registers first. */
13255 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13256 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13257 else
13258 {
13259 /* Get the displacment. */
13260 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13261 {
13262 case 0: u16EffAddr = 0; break;
13263 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13264 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13265 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13266 }
13267
13268 /* Add the base and index registers to the disp. */
13269 switch (bRm & X86_MODRM_RM_MASK)
13270 {
13271 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13272 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13273 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13274 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13275 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13276 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13277 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13278 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13279 }
13280 }
13281
13282 *pGCPtrEff = u16EffAddr;
13283 }
13284 else
13285 {
13286 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13287 uint32_t u32EffAddr;
13288
13289 /* Handle the disp32 form with no registers first. */
13290 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13291 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13292 else
13293 {
13294 /* Get the register (or SIB) value. */
13295 switch ((bRm & X86_MODRM_RM_MASK))
13296 {
13297 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13298 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13299 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13300 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13301 case 4: /* SIB */
13302 {
13303 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13304
13305 /* Get the index and scale it. */
13306 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13307 {
13308 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13309 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13310 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13311 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13312 case 4: u32EffAddr = 0; /*none */ break;
13313 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13314 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13315 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13316 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13317 }
13318 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13319
13320 /* add base */
13321 switch (bSib & X86_SIB_BASE_MASK)
13322 {
13323 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13324 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13325 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13326 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13327 case 4:
13328 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13329 SET_SS_DEF();
13330 break;
13331 case 5:
13332 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13333 {
13334 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13335 SET_SS_DEF();
13336 }
13337 else
13338 {
13339 uint32_t u32Disp;
13340 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13341 u32EffAddr += u32Disp;
13342 }
13343 break;
13344 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13345 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13346 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13347 }
13348 break;
13349 }
13350 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13351 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13352 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13354 }
13355
13356 /* Get and add the displacement. */
13357 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13358 {
13359 case 0:
13360 break;
13361 case 1:
13362 {
13363 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13364 u32EffAddr += i8Disp;
13365 break;
13366 }
13367 case 2:
13368 {
13369 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13370 u32EffAddr += u32Disp;
13371 break;
13372 }
13373 default:
13374 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13375 }
13376
13377 }
13378 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13379 *pGCPtrEff = u32EffAddr;
13380 else
13381 {
13382 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13383 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13384 }
13385 }
13386 }
13387 else
13388 {
13389 uint64_t u64EffAddr;
13390
13391 /* Handle the rip+disp32 form with no registers first. */
13392 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13393 {
13394 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13395 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13396 }
13397 else
13398 {
13399 /* Get the register (or SIB) value. */
13400 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13401 {
13402 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13403 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13404 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13405 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13406 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13407 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13408 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13409 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13410 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13411 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13412 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13413 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13414 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13415 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13416 /* SIB */
13417 case 4:
13418 case 12:
13419 {
13420 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13421
13422 /* Get the index and scale it. */
13423 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13424 {
13425 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13426 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13427 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13428 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13429 case 4: u64EffAddr = 0; /*none */ break;
13430 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13431 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13432 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13433 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13434 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13435 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13436 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13437 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13438 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13439 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13440 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13441 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13442 }
13443 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13444
13445 /* add base */
13446 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13447 {
13448 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13449 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13450 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13451 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13452 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13453 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13454 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13455 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13456 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13457 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13458 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13459 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13460 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13461 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13462 /* complicated encodings */
13463 case 5:
13464 case 13:
13465 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13466 {
13467 if (!pVCpu->iem.s.uRexB)
13468 {
13469 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13470 SET_SS_DEF();
13471 }
13472 else
13473 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13474 }
13475 else
13476 {
13477 uint32_t u32Disp;
13478 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13479 u64EffAddr += (int32_t)u32Disp;
13480 }
13481 break;
13482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13483 }
13484 break;
13485 }
13486 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13487 }
13488
13489 /* Get and add the displacement. */
13490 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13491 {
13492 case 0:
13493 break;
13494 case 1:
13495 {
13496 int8_t i8Disp;
13497 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13498 u64EffAddr += i8Disp;
13499 break;
13500 }
13501 case 2:
13502 {
13503 uint32_t u32Disp;
13504 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13505 u64EffAddr += (int32_t)u32Disp;
13506 break;
13507 }
13508 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13509 }
13510
13511 }
13512
13513 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13514 *pGCPtrEff = u64EffAddr;
13515 else
13516 {
13517 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13518 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13519 }
13520 }
13521
13522 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13523 return VINF_SUCCESS;
13524}
13525
13526
13527#ifdef IEM_WITH_SETJMP
13528/**
13529 * Calculates the effective address of a ModR/M memory operand.
13530 *
13531 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13532 *
13533 * May longjmp on internal error.
13534 *
13535 * @return The effective address.
13536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13537 * @param bRm The ModRM byte.
13538 * @param cbImm The size of any immediate following the
13539 * effective address opcode bytes. Important for
13540 * RIP relative addressing.
13541 */
13542IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13543{
13544 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13545# define SET_SS_DEF() \
13546 do \
13547 { \
13548 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13549 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13550 } while (0)
13551
13552 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13553 {
13554/** @todo Check the effective address size crap! */
13555 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13556 {
13557 uint16_t u16EffAddr;
13558
13559 /* Handle the disp16 form with no registers first. */
13560 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13561 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13562 else
13563 {
13564 /* Get the displacment. */
13565 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13566 {
13567 case 0: u16EffAddr = 0; break;
13568 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13569 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13570 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13571 }
13572
13573 /* Add the base and index registers to the disp. */
13574 switch (bRm & X86_MODRM_RM_MASK)
13575 {
13576 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13577 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13578 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13579 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13580 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13581 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13582 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13583 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13584 }
13585 }
13586
13587 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13588 return u16EffAddr;
13589 }
13590
13591 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13592 uint32_t u32EffAddr;
13593
13594 /* Handle the disp32 form with no registers first. */
13595 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13596 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13597 else
13598 {
13599 /* Get the register (or SIB) value. */
13600 switch ((bRm & X86_MODRM_RM_MASK))
13601 {
13602 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13603 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13604 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13605 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13606 case 4: /* SIB */
13607 {
13608 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13609
13610 /* Get the index and scale it. */
13611 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13612 {
13613 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13614 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13615 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13616 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13617 case 4: u32EffAddr = 0; /*none */ break;
13618 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13619 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13620 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13621 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13622 }
13623 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13624
13625 /* add base */
13626 switch (bSib & X86_SIB_BASE_MASK)
13627 {
13628 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13629 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13630 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13631 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13632 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13633 case 5:
13634 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13635 {
13636 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13637 SET_SS_DEF();
13638 }
13639 else
13640 {
13641 uint32_t u32Disp;
13642 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13643 u32EffAddr += u32Disp;
13644 }
13645 break;
13646 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13647 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13648 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13649 }
13650 break;
13651 }
13652 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13653 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13654 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13655 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13656 }
13657
13658 /* Get and add the displacement. */
13659 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13660 {
13661 case 0:
13662 break;
13663 case 1:
13664 {
13665 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13666 u32EffAddr += i8Disp;
13667 break;
13668 }
13669 case 2:
13670 {
13671 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13672 u32EffAddr += u32Disp;
13673 break;
13674 }
13675 default:
13676 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13677 }
13678 }
13679
13680 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13681 {
13682 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13683 return u32EffAddr;
13684 }
13685 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13686 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13687 return u32EffAddr & UINT16_MAX;
13688 }
13689
13690 uint64_t u64EffAddr;
13691
13692 /* Handle the rip+disp32 form with no registers first. */
13693 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13694 {
13695 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13696 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13697 }
13698 else
13699 {
13700 /* Get the register (or SIB) value. */
13701 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13702 {
13703 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13704 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13705 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13706 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13707 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13708 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13709 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13710 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13711 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13712 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13713 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13714 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13715 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13716 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13717 /* SIB */
13718 case 4:
13719 case 12:
13720 {
13721 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13722
13723 /* Get the index and scale it. */
13724 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13725 {
13726 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13727 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13728 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13729 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13730 case 4: u64EffAddr = 0; /*none */ break;
13731 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13732 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13733 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13734 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13735 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13736 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13737 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13738 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13739 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13740 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13741 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13742 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13743 }
13744 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13745
13746 /* add base */
13747 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13748 {
13749 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13750 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13751 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13752 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13753 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13754 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13755 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13756 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13757 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13758 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13759 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13760 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13761 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13762 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13763 /* complicated encodings */
13764 case 5:
13765 case 13:
13766 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13767 {
13768 if (!pVCpu->iem.s.uRexB)
13769 {
13770 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13771 SET_SS_DEF();
13772 }
13773 else
13774 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13775 }
13776 else
13777 {
13778 uint32_t u32Disp;
13779 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13780 u64EffAddr += (int32_t)u32Disp;
13781 }
13782 break;
13783 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13784 }
13785 break;
13786 }
13787 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13788 }
13789
13790 /* Get and add the displacement. */
13791 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13792 {
13793 case 0:
13794 break;
13795 case 1:
13796 {
13797 int8_t i8Disp;
13798 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13799 u64EffAddr += i8Disp;
13800 break;
13801 }
13802 case 2:
13803 {
13804 uint32_t u32Disp;
13805 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13806 u64EffAddr += (int32_t)u32Disp;
13807 break;
13808 }
13809 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13810 }
13811
13812 }
13813
13814 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13815 {
13816 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13817 return u64EffAddr;
13818 }
13819 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13820 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13821 return u64EffAddr & UINT32_MAX;
13822}
13823#endif /* IEM_WITH_SETJMP */
13824
13825/** @} */
13826
13827
13828
13829/*
13830 * Include the instructions
13831 */
13832#include "IEMAllInstructions.cpp.h"
13833
13834
13835
13836#ifdef LOG_ENABLED
13837/**
13838 * Logs the current instruction.
13839 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13840 * @param fSameCtx Set if we have the same context information as the VMM,
13841 * clear if we may have already executed an instruction in
13842 * our debug context. When clear, we assume IEMCPU holds
13843 * valid CPU mode info.
13844 *
13845 * The @a fSameCtx parameter is now misleading and obsolete.
13846 * @param pszFunction The IEM function doing the execution.
13847 */
13848IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13849{
13850# ifdef IN_RING3
13851 if (LogIs2Enabled())
13852 {
13853 char szInstr[256];
13854 uint32_t cbInstr = 0;
13855 if (fSameCtx)
13856 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13857 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13858 szInstr, sizeof(szInstr), &cbInstr);
13859 else
13860 {
13861 uint32_t fFlags = 0;
13862 switch (pVCpu->iem.s.enmCpuMode)
13863 {
13864 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13865 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13866 case IEMMODE_16BIT:
13867 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13868 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13869 else
13870 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13871 break;
13872 }
13873 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13874 szInstr, sizeof(szInstr), &cbInstr);
13875 }
13876
13877 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13878 Log2(("**** %s\n"
13879 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13880 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13881 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13882 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13883 " %s\n"
13884 , pszFunction,
13885 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13886 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13887 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13888 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13889 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13890 szInstr));
13891
13892 if (LogIs3Enabled())
13893 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13894 }
13895 else
13896# endif
13897 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13898 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13899 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13900}
13901#endif /* LOG_ENABLED */
13902
13903
13904/**
13905 * Makes status code addjustments (pass up from I/O and access handler)
13906 * as well as maintaining statistics.
13907 *
13908 * @returns Strict VBox status code to pass up.
13909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13910 * @param rcStrict The status from executing an instruction.
13911 */
13912DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13913{
13914 if (rcStrict != VINF_SUCCESS)
13915 {
13916 if (RT_SUCCESS(rcStrict))
13917 {
13918 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13919 || rcStrict == VINF_IOM_R3_IOPORT_READ
13920 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13921 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13922 || rcStrict == VINF_IOM_R3_MMIO_READ
13923 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13924 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13925 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13926 || rcStrict == VINF_CPUM_R3_MSR_READ
13927 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13928 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13929 || rcStrict == VINF_EM_RAW_TO_R3
13930 || rcStrict == VINF_EM_TRIPLE_FAULT
13931 || rcStrict == VINF_GIM_R3_HYPERCALL
13932 /* raw-mode / virt handlers only: */
13933 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13934 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13935 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13936 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13937 || rcStrict == VINF_SELM_SYNC_GDT
13938 || rcStrict == VINF_CSAM_PENDING_ACTION
13939 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13940 /* nested hw.virt codes: */
13941 || rcStrict == VINF_VMX_VMEXIT
13942 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13943 || rcStrict == VINF_SVM_VMEXIT
13944 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13945/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13946 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13947#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13948 if ( rcStrict == VINF_VMX_VMEXIT
13949 && rcPassUp == VINF_SUCCESS)
13950 rcStrict = VINF_SUCCESS;
13951 else
13952#endif
13953#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13954 if ( rcStrict == VINF_SVM_VMEXIT
13955 && rcPassUp == VINF_SUCCESS)
13956 rcStrict = VINF_SUCCESS;
13957 else
13958#endif
13959 if (rcPassUp == VINF_SUCCESS)
13960 pVCpu->iem.s.cRetInfStatuses++;
13961 else if ( rcPassUp < VINF_EM_FIRST
13962 || rcPassUp > VINF_EM_LAST
13963 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13964 {
13965 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13966 pVCpu->iem.s.cRetPassUpStatus++;
13967 rcStrict = rcPassUp;
13968 }
13969 else
13970 {
13971 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13972 pVCpu->iem.s.cRetInfStatuses++;
13973 }
13974 }
13975 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13976 pVCpu->iem.s.cRetAspectNotImplemented++;
13977 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13978 pVCpu->iem.s.cRetInstrNotImplemented++;
13979 else
13980 pVCpu->iem.s.cRetErrStatuses++;
13981 }
13982 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13983 {
13984 pVCpu->iem.s.cRetPassUpStatus++;
13985 rcStrict = pVCpu->iem.s.rcPassUp;
13986 }
13987
13988 return rcStrict;
13989}
13990
13991
13992/**
13993 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13994 * IEMExecOneWithPrefetchedByPC.
13995 *
13996 * Similar code is found in IEMExecLots.
13997 *
13998 * @return Strict VBox status code.
13999 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14000 * @param fExecuteInhibit If set, execute the instruction following CLI,
14001 * POP SS and MOV SS,GR.
14002 * @param pszFunction The calling function name.
14003 */
14004DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
14005{
14006 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14007 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14008 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14009 RT_NOREF_PV(pszFunction);
14010
14011#ifdef IEM_WITH_SETJMP
14012 VBOXSTRICTRC rcStrict;
14013 jmp_buf JmpBuf;
14014 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14015 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14016 if ((rcStrict = setjmp(JmpBuf)) == 0)
14017 {
14018 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14019 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14020 }
14021 else
14022 pVCpu->iem.s.cLongJumps++;
14023 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14024#else
14025 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14026 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14027#endif
14028 if (rcStrict == VINF_SUCCESS)
14029 pVCpu->iem.s.cInstructions++;
14030 if (pVCpu->iem.s.cActiveMappings > 0)
14031 {
14032 Assert(rcStrict != VINF_SUCCESS);
14033 iemMemRollback(pVCpu);
14034 }
14035 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14036 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14037 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14038
14039//#ifdef DEBUG
14040// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14041//#endif
14042
14043#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14044 /*
14045 * Perform any VMX nested-guest instruction boundary actions.
14046 *
14047 * If any of these causes a VM-exit, we must skip executing the next
14048 * instruction (would run into stale page tables). A VM-exit makes sure
14049 * there is no interrupt-inhibition, so that should ensure we don't go
14050 * to try execute the next instruction. Clearing fExecuteInhibit is
14051 * problematic because of the setjmp/longjmp clobbering above.
14052 */
14053 if ( rcStrict == VINF_SUCCESS
14054 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14055 {
14056 /* TPR-below threshold/APIC write has the highest priority. */
14057 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14058 {
14059 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14060 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14061 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14062 }
14063 /* MTF takes priority over VMX-preemption timer. */
14064 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14065 {
14066 rcStrict = iemVmxVmexitMtf(pVCpu);
14067 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14068 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14069 }
14070 /* VMX preemption timer takes priority over NMI-window exits. */
14071 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14072 {
14073 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14074 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14075 rcStrict = VINF_SUCCESS;
14076 else
14077 {
14078 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14079 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14080 }
14081 }
14082 /* NMI-window VM-exit. */
14083 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW))
14084 {
14085 rcStrict = iemVmxVmexitNmiWindow(pVCpu);
14086 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14087 }
14088 }
14089#endif
14090
14091 /* Execute the next instruction as well if a cli, pop ss or
14092 mov ss, Gr has just completed successfully. */
14093 if ( fExecuteInhibit
14094 && rcStrict == VINF_SUCCESS
14095 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14096 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14097 {
14098 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14099 if (rcStrict == VINF_SUCCESS)
14100 {
14101#ifdef LOG_ENABLED
14102 iemLogCurInstr(pVCpu, false, pszFunction);
14103#endif
14104#ifdef IEM_WITH_SETJMP
14105 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14106 if ((rcStrict = setjmp(JmpBuf)) == 0)
14107 {
14108 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14109 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14110 }
14111 else
14112 pVCpu->iem.s.cLongJumps++;
14113 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14114#else
14115 IEM_OPCODE_GET_NEXT_U8(&b);
14116 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14117#endif
14118 if (rcStrict == VINF_SUCCESS)
14119 pVCpu->iem.s.cInstructions++;
14120 if (pVCpu->iem.s.cActiveMappings > 0)
14121 {
14122 Assert(rcStrict != VINF_SUCCESS);
14123 iemMemRollback(pVCpu);
14124 }
14125 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14126 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14127 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14128 }
14129 else if (pVCpu->iem.s.cActiveMappings > 0)
14130 iemMemRollback(pVCpu);
14131 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14132 }
14133
14134 /*
14135 * Return value fiddling, statistics and sanity assertions.
14136 */
14137 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14138
14139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14141 return rcStrict;
14142}
14143
14144
14145#ifdef IN_RC
14146/**
14147 * Re-enters raw-mode or ensure we return to ring-3.
14148 *
14149 * @returns rcStrict, maybe modified.
14150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14151 * @param rcStrict The status code returne by the interpreter.
14152 */
14153DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14154{
14155 if ( !pVCpu->iem.s.fInPatchCode
14156 && ( rcStrict == VINF_SUCCESS
14157 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14158 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14159 {
14160 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14161 CPUMRawEnter(pVCpu);
14162 else
14163 {
14164 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14165 rcStrict = VINF_EM_RESCHEDULE;
14166 }
14167 }
14168 return rcStrict;
14169}
14170#endif
14171
14172
14173/**
14174 * Execute one instruction.
14175 *
14176 * @return Strict VBox status code.
14177 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14178 */
14179VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14180{
14181#ifdef LOG_ENABLED
14182 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14183#endif
14184
14185 /*
14186 * Do the decoding and emulation.
14187 */
14188 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14189 if (rcStrict == VINF_SUCCESS)
14190 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14191 else if (pVCpu->iem.s.cActiveMappings > 0)
14192 iemMemRollback(pVCpu);
14193
14194#ifdef IN_RC
14195 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14196#endif
14197 if (rcStrict != VINF_SUCCESS)
14198 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14199 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14200 return rcStrict;
14201}
14202
14203
14204VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14205{
14206 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14207
14208 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14209 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14210 if (rcStrict == VINF_SUCCESS)
14211 {
14212 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14213 if (pcbWritten)
14214 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14215 }
14216 else if (pVCpu->iem.s.cActiveMappings > 0)
14217 iemMemRollback(pVCpu);
14218
14219#ifdef IN_RC
14220 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14221#endif
14222 return rcStrict;
14223}
14224
14225
14226VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14227 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14228{
14229 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14230
14231 VBOXSTRICTRC rcStrict;
14232 if ( cbOpcodeBytes
14233 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14234 {
14235 iemInitDecoder(pVCpu, false);
14236#ifdef IEM_WITH_CODE_TLB
14237 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14238 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14239 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14240 pVCpu->iem.s.offCurInstrStart = 0;
14241 pVCpu->iem.s.offInstrNextByte = 0;
14242#else
14243 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14244 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14245#endif
14246 rcStrict = VINF_SUCCESS;
14247 }
14248 else
14249 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14250 if (rcStrict == VINF_SUCCESS)
14251 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14252 else if (pVCpu->iem.s.cActiveMappings > 0)
14253 iemMemRollback(pVCpu);
14254
14255#ifdef IN_RC
14256 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14257#endif
14258 return rcStrict;
14259}
14260
14261
14262VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14263{
14264 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14265
14266 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14267 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14268 if (rcStrict == VINF_SUCCESS)
14269 {
14270 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14271 if (pcbWritten)
14272 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14273 }
14274 else if (pVCpu->iem.s.cActiveMappings > 0)
14275 iemMemRollback(pVCpu);
14276
14277#ifdef IN_RC
14278 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14279#endif
14280 return rcStrict;
14281}
14282
14283
14284VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14285 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14286{
14287 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14288
14289 VBOXSTRICTRC rcStrict;
14290 if ( cbOpcodeBytes
14291 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14292 {
14293 iemInitDecoder(pVCpu, true);
14294#ifdef IEM_WITH_CODE_TLB
14295 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14296 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14297 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14298 pVCpu->iem.s.offCurInstrStart = 0;
14299 pVCpu->iem.s.offInstrNextByte = 0;
14300#else
14301 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14302 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14303#endif
14304 rcStrict = VINF_SUCCESS;
14305 }
14306 else
14307 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14308 if (rcStrict == VINF_SUCCESS)
14309 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14310 else if (pVCpu->iem.s.cActiveMappings > 0)
14311 iemMemRollback(pVCpu);
14312
14313#ifdef IN_RC
14314 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14315#endif
14316 return rcStrict;
14317}
14318
14319
14320/**
14321 * For debugging DISGetParamSize, may come in handy.
14322 *
14323 * @returns Strict VBox status code.
14324 * @param pVCpu The cross context virtual CPU structure of the
14325 * calling EMT.
14326 * @param pCtxCore The context core structure.
14327 * @param OpcodeBytesPC The PC of the opcode bytes.
14328 * @param pvOpcodeBytes Prefeched opcode bytes.
14329 * @param cbOpcodeBytes Number of prefetched bytes.
14330 * @param pcbWritten Where to return the number of bytes written.
14331 * Optional.
14332 */
14333VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14334 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14335 uint32_t *pcbWritten)
14336{
14337 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14338
14339 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14340 VBOXSTRICTRC rcStrict;
14341 if ( cbOpcodeBytes
14342 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14343 {
14344 iemInitDecoder(pVCpu, true);
14345#ifdef IEM_WITH_CODE_TLB
14346 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14347 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14348 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14349 pVCpu->iem.s.offCurInstrStart = 0;
14350 pVCpu->iem.s.offInstrNextByte = 0;
14351#else
14352 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14353 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14354#endif
14355 rcStrict = VINF_SUCCESS;
14356 }
14357 else
14358 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14359 if (rcStrict == VINF_SUCCESS)
14360 {
14361 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14362 if (pcbWritten)
14363 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14364 }
14365 else if (pVCpu->iem.s.cActiveMappings > 0)
14366 iemMemRollback(pVCpu);
14367
14368#ifdef IN_RC
14369 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14370#endif
14371 return rcStrict;
14372}
14373
14374
14375VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14376{
14377 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14378 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14379
14380 /*
14381 * See if there is an interrupt pending in TRPM, inject it if we can.
14382 */
14383 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14384#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14385 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14386 if (fIntrEnabled)
14387 {
14388 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14389 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14390 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14391 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14392 else
14393 {
14394 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14395 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14396 }
14397 }
14398#else
14399 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14400#endif
14401 if ( fIntrEnabled
14402 && TRPMHasTrap(pVCpu)
14403 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14404 {
14405 uint8_t u8TrapNo;
14406 TRPMEVENT enmType;
14407 RTGCUINT uErrCode;
14408 RTGCPTR uCr2;
14409 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14410 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14411 TRPMResetTrap(pVCpu);
14412#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14413 /* Injecting an event may cause a VM-exit. */
14414 if ( rcStrict != VINF_SUCCESS
14415 && rcStrict != VINF_IEM_RAISED_XCPT)
14416 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14417#else
14418 NOREF(rcStrict);
14419#endif
14420 }
14421
14422 /*
14423 * Initial decoder init w/ prefetch, then setup setjmp.
14424 */
14425 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14426 if (rcStrict == VINF_SUCCESS)
14427 {
14428#ifdef IEM_WITH_SETJMP
14429 jmp_buf JmpBuf;
14430 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14431 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14432 pVCpu->iem.s.cActiveMappings = 0;
14433 if ((rcStrict = setjmp(JmpBuf)) == 0)
14434#endif
14435 {
14436 /*
14437 * The run loop. We limit ourselves to 4096 instructions right now.
14438 */
14439 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14440 PVM pVM = pVCpu->CTX_SUFF(pVM);
14441 for (;;)
14442 {
14443 /*
14444 * Log the state.
14445 */
14446#ifdef LOG_ENABLED
14447 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14448#endif
14449
14450 /*
14451 * Do the decoding and emulation.
14452 */
14453 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14454 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14455 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14456 {
14457 Assert(pVCpu->iem.s.cActiveMappings == 0);
14458 pVCpu->iem.s.cInstructions++;
14459 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14460 {
14461 uint64_t fCpu = pVCpu->fLocalForcedActions
14462 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14463 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14464 | VMCPU_FF_TLB_FLUSH
14465#ifdef VBOX_WITH_RAW_MODE
14466 | VMCPU_FF_TRPM_SYNC_IDT
14467 | VMCPU_FF_SELM_SYNC_TSS
14468 | VMCPU_FF_SELM_SYNC_GDT
14469 | VMCPU_FF_SELM_SYNC_LDT
14470#endif
14471 | VMCPU_FF_INHIBIT_INTERRUPTS
14472 | VMCPU_FF_BLOCK_NMIS
14473 | VMCPU_FF_UNHALT ));
14474
14475 if (RT_LIKELY( ( !fCpu
14476 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14477 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14478 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14479 {
14480 if (cMaxInstructionsGccStupidity-- > 0)
14481 {
14482 /* Poll timers every now an then according to the caller's specs. */
14483 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14484 || !TMTimerPollBool(pVM, pVCpu))
14485 {
14486 Assert(pVCpu->iem.s.cActiveMappings == 0);
14487 iemReInitDecoder(pVCpu);
14488 continue;
14489 }
14490 }
14491 }
14492 }
14493 Assert(pVCpu->iem.s.cActiveMappings == 0);
14494 }
14495 else if (pVCpu->iem.s.cActiveMappings > 0)
14496 iemMemRollback(pVCpu);
14497 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14498 break;
14499 }
14500 }
14501#ifdef IEM_WITH_SETJMP
14502 else
14503 {
14504 if (pVCpu->iem.s.cActiveMappings > 0)
14505 iemMemRollback(pVCpu);
14506# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14507 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14508# endif
14509 pVCpu->iem.s.cLongJumps++;
14510 }
14511 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14512#endif
14513
14514 /*
14515 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14516 */
14517 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14518 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14519 }
14520 else
14521 {
14522 if (pVCpu->iem.s.cActiveMappings > 0)
14523 iemMemRollback(pVCpu);
14524
14525#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14526 /*
14527 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14528 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14529 */
14530 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14531#endif
14532 }
14533
14534 /*
14535 * Maybe re-enter raw-mode and log.
14536 */
14537#ifdef IN_RC
14538 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14539#endif
14540 if (rcStrict != VINF_SUCCESS)
14541 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14542 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14543 if (pcInstructions)
14544 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14545 return rcStrict;
14546}
14547
14548
14549/**
14550 * Interface used by EMExecuteExec, does exit statistics and limits.
14551 *
14552 * @returns Strict VBox status code.
14553 * @param pVCpu The cross context virtual CPU structure.
14554 * @param fWillExit To be defined.
14555 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14556 * @param cMaxInstructions Maximum number of instructions to execute.
14557 * @param cMaxInstructionsWithoutExits
14558 * The max number of instructions without exits.
14559 * @param pStats Where to return statistics.
14560 */
14561VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14562 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14563{
14564 NOREF(fWillExit); /** @todo define flexible exit crits */
14565
14566 /*
14567 * Initialize return stats.
14568 */
14569 pStats->cInstructions = 0;
14570 pStats->cExits = 0;
14571 pStats->cMaxExitDistance = 0;
14572 pStats->cReserved = 0;
14573
14574 /*
14575 * Initial decoder init w/ prefetch, then setup setjmp.
14576 */
14577 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14578 if (rcStrict == VINF_SUCCESS)
14579 {
14580#ifdef IEM_WITH_SETJMP
14581 jmp_buf JmpBuf;
14582 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14583 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14584 pVCpu->iem.s.cActiveMappings = 0;
14585 if ((rcStrict = setjmp(JmpBuf)) == 0)
14586#endif
14587 {
14588#ifdef IN_RING0
14589 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14590#endif
14591 uint32_t cInstructionSinceLastExit = 0;
14592
14593 /*
14594 * The run loop. We limit ourselves to 4096 instructions right now.
14595 */
14596 PVM pVM = pVCpu->CTX_SUFF(pVM);
14597 for (;;)
14598 {
14599 /*
14600 * Log the state.
14601 */
14602#ifdef LOG_ENABLED
14603 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14604#endif
14605
14606 /*
14607 * Do the decoding and emulation.
14608 */
14609 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14610
14611 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14612 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14613
14614 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14615 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14616 {
14617 pStats->cExits += 1;
14618 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14619 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14620 cInstructionSinceLastExit = 0;
14621 }
14622
14623 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14624 {
14625 Assert(pVCpu->iem.s.cActiveMappings == 0);
14626 pVCpu->iem.s.cInstructions++;
14627 pStats->cInstructions++;
14628 cInstructionSinceLastExit++;
14629 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14630 {
14631 uint64_t fCpu = pVCpu->fLocalForcedActions
14632 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14633 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14634 | VMCPU_FF_TLB_FLUSH
14635#ifdef VBOX_WITH_RAW_MODE
14636 | VMCPU_FF_TRPM_SYNC_IDT
14637 | VMCPU_FF_SELM_SYNC_TSS
14638 | VMCPU_FF_SELM_SYNC_GDT
14639 | VMCPU_FF_SELM_SYNC_LDT
14640#endif
14641 | VMCPU_FF_INHIBIT_INTERRUPTS
14642 | VMCPU_FF_BLOCK_NMIS
14643 | VMCPU_FF_UNHALT ));
14644
14645 if (RT_LIKELY( ( ( !fCpu
14646 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14647 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14648 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14649 || pStats->cInstructions < cMinInstructions))
14650 {
14651 if (pStats->cInstructions < cMaxInstructions)
14652 {
14653 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14654 {
14655#ifdef IN_RING0
14656 if ( !fCheckPreemptionPending
14657 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14658#endif
14659 {
14660 Assert(pVCpu->iem.s.cActiveMappings == 0);
14661 iemReInitDecoder(pVCpu);
14662 continue;
14663 }
14664#ifdef IN_RING0
14665 rcStrict = VINF_EM_RAW_INTERRUPT;
14666 break;
14667#endif
14668 }
14669 }
14670 }
14671 Assert(!(fCpu & VMCPU_FF_IEM));
14672 }
14673 Assert(pVCpu->iem.s.cActiveMappings == 0);
14674 }
14675 else if (pVCpu->iem.s.cActiveMappings > 0)
14676 iemMemRollback(pVCpu);
14677 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14678 break;
14679 }
14680 }
14681#ifdef IEM_WITH_SETJMP
14682 else
14683 {
14684 if (pVCpu->iem.s.cActiveMappings > 0)
14685 iemMemRollback(pVCpu);
14686 pVCpu->iem.s.cLongJumps++;
14687 }
14688 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14689#endif
14690
14691 /*
14692 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14693 */
14694 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14695 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14696 }
14697 else
14698 {
14699 if (pVCpu->iem.s.cActiveMappings > 0)
14700 iemMemRollback(pVCpu);
14701
14702#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14703 /*
14704 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14705 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14706 */
14707 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14708#endif
14709 }
14710
14711 /*
14712 * Maybe re-enter raw-mode and log.
14713 */
14714#ifdef IN_RC
14715 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14716#endif
14717 if (rcStrict != VINF_SUCCESS)
14718 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14719 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14720 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14721 return rcStrict;
14722}
14723
14724
14725/**
14726 * Injects a trap, fault, abort, software interrupt or external interrupt.
14727 *
14728 * The parameter list matches TRPMQueryTrapAll pretty closely.
14729 *
14730 * @returns Strict VBox status code.
14731 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14732 * @param u8TrapNo The trap number.
14733 * @param enmType What type is it (trap/fault/abort), software
14734 * interrupt or hardware interrupt.
14735 * @param uErrCode The error code if applicable.
14736 * @param uCr2 The CR2 value if applicable.
14737 * @param cbInstr The instruction length (only relevant for
14738 * software interrupts).
14739 */
14740VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14741 uint8_t cbInstr)
14742{
14743 iemInitDecoder(pVCpu, false);
14744#ifdef DBGFTRACE_ENABLED
14745 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14746 u8TrapNo, enmType, uErrCode, uCr2);
14747#endif
14748
14749 uint32_t fFlags;
14750 switch (enmType)
14751 {
14752 case TRPM_HARDWARE_INT:
14753 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14754 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14755 uErrCode = uCr2 = 0;
14756 break;
14757
14758 case TRPM_SOFTWARE_INT:
14759 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14760 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14761 uErrCode = uCr2 = 0;
14762 break;
14763
14764 case TRPM_TRAP:
14765 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14766 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14767 if (u8TrapNo == X86_XCPT_PF)
14768 fFlags |= IEM_XCPT_FLAGS_CR2;
14769 switch (u8TrapNo)
14770 {
14771 case X86_XCPT_DF:
14772 case X86_XCPT_TS:
14773 case X86_XCPT_NP:
14774 case X86_XCPT_SS:
14775 case X86_XCPT_PF:
14776 case X86_XCPT_AC:
14777 fFlags |= IEM_XCPT_FLAGS_ERR;
14778 break;
14779 }
14780 break;
14781
14782 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14783 }
14784
14785 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14786
14787 if (pVCpu->iem.s.cActiveMappings > 0)
14788 iemMemRollback(pVCpu);
14789
14790 return rcStrict;
14791}
14792
14793
14794/**
14795 * Injects the active TRPM event.
14796 *
14797 * @returns Strict VBox status code.
14798 * @param pVCpu The cross context virtual CPU structure.
14799 */
14800VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14801{
14802#ifndef IEM_IMPLEMENTS_TASKSWITCH
14803 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14804#else
14805 uint8_t u8TrapNo;
14806 TRPMEVENT enmType;
14807 RTGCUINT uErrCode;
14808 RTGCUINTPTR uCr2;
14809 uint8_t cbInstr;
14810 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14811 if (RT_FAILURE(rc))
14812 return rc;
14813
14814 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14815#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14816 if (rcStrict == VINF_SVM_VMEXIT)
14817 rcStrict = VINF_SUCCESS;
14818#endif
14819#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14820 if (rcStrict == VINF_VMX_VMEXIT)
14821 rcStrict = VINF_SUCCESS;
14822#endif
14823 /** @todo Are there any other codes that imply the event was successfully
14824 * delivered to the guest? See @bugref{6607}. */
14825 if ( rcStrict == VINF_SUCCESS
14826 || rcStrict == VINF_IEM_RAISED_XCPT)
14827 TRPMResetTrap(pVCpu);
14828
14829 return rcStrict;
14830#endif
14831}
14832
14833
14834VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14835{
14836 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14837 return VERR_NOT_IMPLEMENTED;
14838}
14839
14840
14841VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14842{
14843 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14844 return VERR_NOT_IMPLEMENTED;
14845}
14846
14847
14848#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14849/**
14850 * Executes a IRET instruction with default operand size.
14851 *
14852 * This is for PATM.
14853 *
14854 * @returns VBox status code.
14855 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14856 * @param pCtxCore The register frame.
14857 */
14858VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14859{
14860 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14861
14862 iemCtxCoreToCtx(pCtx, pCtxCore);
14863 iemInitDecoder(pVCpu);
14864 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14865 if (rcStrict == VINF_SUCCESS)
14866 iemCtxToCtxCore(pCtxCore, pCtx);
14867 else
14868 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14869 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14870 return rcStrict;
14871}
14872#endif
14873
14874
14875/**
14876 * Macro used by the IEMExec* method to check the given instruction length.
14877 *
14878 * Will return on failure!
14879 *
14880 * @param a_cbInstr The given instruction length.
14881 * @param a_cbMin The minimum length.
14882 */
14883#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14884 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14885 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14886
14887
14888/**
14889 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14890 *
14891 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14892 *
14893 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14895 * @param rcStrict The status code to fiddle.
14896 */
14897DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14898{
14899 iemUninitExec(pVCpu);
14900#ifdef IN_RC
14901 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14902#else
14903 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14904#endif
14905}
14906
14907
14908/**
14909 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14910 *
14911 * This API ASSUMES that the caller has already verified that the guest code is
14912 * allowed to access the I/O port. (The I/O port is in the DX register in the
14913 * guest state.)
14914 *
14915 * @returns Strict VBox status code.
14916 * @param pVCpu The cross context virtual CPU structure.
14917 * @param cbValue The size of the I/O port access (1, 2, or 4).
14918 * @param enmAddrMode The addressing mode.
14919 * @param fRepPrefix Indicates whether a repeat prefix is used
14920 * (doesn't matter which for this instruction).
14921 * @param cbInstr The instruction length in bytes.
14922 * @param iEffSeg The effective segment address.
14923 * @param fIoChecked Whether the access to the I/O port has been
14924 * checked or not. It's typically checked in the
14925 * HM scenario.
14926 */
14927VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14928 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14929{
14930 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14931 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14932
14933 /*
14934 * State init.
14935 */
14936 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14937
14938 /*
14939 * Switch orgy for getting to the right handler.
14940 */
14941 VBOXSTRICTRC rcStrict;
14942 if (fRepPrefix)
14943 {
14944 switch (enmAddrMode)
14945 {
14946 case IEMMODE_16BIT:
14947 switch (cbValue)
14948 {
14949 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14950 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14951 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14952 default:
14953 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14954 }
14955 break;
14956
14957 case IEMMODE_32BIT:
14958 switch (cbValue)
14959 {
14960 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14961 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14962 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14963 default:
14964 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14965 }
14966 break;
14967
14968 case IEMMODE_64BIT:
14969 switch (cbValue)
14970 {
14971 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14972 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14973 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14974 default:
14975 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14976 }
14977 break;
14978
14979 default:
14980 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14981 }
14982 }
14983 else
14984 {
14985 switch (enmAddrMode)
14986 {
14987 case IEMMODE_16BIT:
14988 switch (cbValue)
14989 {
14990 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14991 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14992 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14993 default:
14994 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14995 }
14996 break;
14997
14998 case IEMMODE_32BIT:
14999 switch (cbValue)
15000 {
15001 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15002 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15003 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15004 default:
15005 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15006 }
15007 break;
15008
15009 case IEMMODE_64BIT:
15010 switch (cbValue)
15011 {
15012 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15013 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15014 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15015 default:
15016 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15017 }
15018 break;
15019
15020 default:
15021 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15022 }
15023 }
15024
15025 if (pVCpu->iem.s.cActiveMappings)
15026 iemMemRollback(pVCpu);
15027
15028 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15029}
15030
15031
15032/**
15033 * Interface for HM and EM for executing string I/O IN (read) instructions.
15034 *
15035 * This API ASSUMES that the caller has already verified that the guest code is
15036 * allowed to access the I/O port. (The I/O port is in the DX register in the
15037 * guest state.)
15038 *
15039 * @returns Strict VBox status code.
15040 * @param pVCpu The cross context virtual CPU structure.
15041 * @param cbValue The size of the I/O port access (1, 2, or 4).
15042 * @param enmAddrMode The addressing mode.
15043 * @param fRepPrefix Indicates whether a repeat prefix is used
15044 * (doesn't matter which for this instruction).
15045 * @param cbInstr The instruction length in bytes.
15046 * @param fIoChecked Whether the access to the I/O port has been
15047 * checked or not. It's typically checked in the
15048 * HM scenario.
15049 */
15050VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15051 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15052{
15053 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15054
15055 /*
15056 * State init.
15057 */
15058 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15059
15060 /*
15061 * Switch orgy for getting to the right handler.
15062 */
15063 VBOXSTRICTRC rcStrict;
15064 if (fRepPrefix)
15065 {
15066 switch (enmAddrMode)
15067 {
15068 case IEMMODE_16BIT:
15069 switch (cbValue)
15070 {
15071 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15072 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15073 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15074 default:
15075 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15076 }
15077 break;
15078
15079 case IEMMODE_32BIT:
15080 switch (cbValue)
15081 {
15082 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15083 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15084 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15085 default:
15086 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15087 }
15088 break;
15089
15090 case IEMMODE_64BIT:
15091 switch (cbValue)
15092 {
15093 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15094 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15095 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15096 default:
15097 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15098 }
15099 break;
15100
15101 default:
15102 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15103 }
15104 }
15105 else
15106 {
15107 switch (enmAddrMode)
15108 {
15109 case IEMMODE_16BIT:
15110 switch (cbValue)
15111 {
15112 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15113 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15114 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15115 default:
15116 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15117 }
15118 break;
15119
15120 case IEMMODE_32BIT:
15121 switch (cbValue)
15122 {
15123 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15124 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15125 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15126 default:
15127 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15128 }
15129 break;
15130
15131 case IEMMODE_64BIT:
15132 switch (cbValue)
15133 {
15134 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15135 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15136 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15137 default:
15138 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15139 }
15140 break;
15141
15142 default:
15143 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15144 }
15145 }
15146
15147 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15148 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15149}
15150
15151
15152/**
15153 * Interface for rawmode to write execute an OUT instruction.
15154 *
15155 * @returns Strict VBox status code.
15156 * @param pVCpu The cross context virtual CPU structure.
15157 * @param cbInstr The instruction length in bytes.
15158 * @param u16Port The port to read.
15159 * @param fImm Whether the port is specified using an immediate operand or
15160 * using the implicit DX register.
15161 * @param cbReg The register size.
15162 *
15163 * @remarks In ring-0 not all of the state needs to be synced in.
15164 */
15165VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15166{
15167 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15168 Assert(cbReg <= 4 && cbReg != 3);
15169
15170 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15171 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15172 Assert(!pVCpu->iem.s.cActiveMappings);
15173 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15174}
15175
15176
15177/**
15178 * Interface for rawmode to write execute an IN instruction.
15179 *
15180 * @returns Strict VBox status code.
15181 * @param pVCpu The cross context virtual CPU structure.
15182 * @param cbInstr The instruction length in bytes.
15183 * @param u16Port The port to read.
15184 * @param fImm Whether the port is specified using an immediate operand or
15185 * using the implicit DX.
15186 * @param cbReg The register size.
15187 */
15188VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15189{
15190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15191 Assert(cbReg <= 4 && cbReg != 3);
15192
15193 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15194 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15195 Assert(!pVCpu->iem.s.cActiveMappings);
15196 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15197}
15198
15199
15200/**
15201 * Interface for HM and EM to write to a CRx register.
15202 *
15203 * @returns Strict VBox status code.
15204 * @param pVCpu The cross context virtual CPU structure.
15205 * @param cbInstr The instruction length in bytes.
15206 * @param iCrReg The control register number (destination).
15207 * @param iGReg The general purpose register number (source).
15208 *
15209 * @remarks In ring-0 not all of the state needs to be synced in.
15210 */
15211VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15212{
15213 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15214 Assert(iCrReg < 16);
15215 Assert(iGReg < 16);
15216
15217 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15218 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15219 Assert(!pVCpu->iem.s.cActiveMappings);
15220 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15221}
15222
15223
15224/**
15225 * Interface for HM and EM to read from a CRx register.
15226 *
15227 * @returns Strict VBox status code.
15228 * @param pVCpu The cross context virtual CPU structure.
15229 * @param cbInstr The instruction length in bytes.
15230 * @param iGReg The general purpose register number (destination).
15231 * @param iCrReg The control register number (source).
15232 *
15233 * @remarks In ring-0 not all of the state needs to be synced in.
15234 */
15235VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15236{
15237 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15238 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15239 | CPUMCTX_EXTRN_APIC_TPR);
15240 Assert(iCrReg < 16);
15241 Assert(iGReg < 16);
15242
15243 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15244 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15245 Assert(!pVCpu->iem.s.cActiveMappings);
15246 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15247}
15248
15249
15250/**
15251 * Interface for HM and EM to clear the CR0[TS] bit.
15252 *
15253 * @returns Strict VBox status code.
15254 * @param pVCpu The cross context virtual CPU structure.
15255 * @param cbInstr The instruction length in bytes.
15256 *
15257 * @remarks In ring-0 not all of the state needs to be synced in.
15258 */
15259VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15260{
15261 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15262
15263 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15264 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15265 Assert(!pVCpu->iem.s.cActiveMappings);
15266 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15267}
15268
15269
15270/**
15271 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15272 *
15273 * @returns Strict VBox status code.
15274 * @param pVCpu The cross context virtual CPU structure.
15275 * @param cbInstr The instruction length in bytes.
15276 * @param uValue The value to load into CR0.
15277 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15278 * memory operand. Otherwise pass NIL_RTGCPTR.
15279 *
15280 * @remarks In ring-0 not all of the state needs to be synced in.
15281 */
15282VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15283{
15284 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15285
15286 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15287 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15288 Assert(!pVCpu->iem.s.cActiveMappings);
15289 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15290}
15291
15292
15293/**
15294 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15295 *
15296 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15297 *
15298 * @returns Strict VBox status code.
15299 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15300 * @param cbInstr The instruction length in bytes.
15301 * @remarks In ring-0 not all of the state needs to be synced in.
15302 * @thread EMT(pVCpu)
15303 */
15304VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15305{
15306 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15307
15308 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15309 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15310 Assert(!pVCpu->iem.s.cActiveMappings);
15311 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15312}
15313
15314
15315/**
15316 * Interface for HM and EM to emulate the WBINVD instruction.
15317 *
15318 * @returns Strict VBox status code.
15319 * @param pVCpu The cross context virtual CPU structure.
15320 * @param cbInstr The instruction length in bytes.
15321 *
15322 * @remarks In ring-0 not all of the state needs to be synced in.
15323 */
15324VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15325{
15326 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15327
15328 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15329 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15330 Assert(!pVCpu->iem.s.cActiveMappings);
15331 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15332}
15333
15334
15335/**
15336 * Interface for HM and EM to emulate the INVD instruction.
15337 *
15338 * @returns Strict VBox status code.
15339 * @param pVCpu The cross context virtual CPU structure.
15340 * @param cbInstr The instruction length in bytes.
15341 *
15342 * @remarks In ring-0 not all of the state needs to be synced in.
15343 */
15344VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15345{
15346 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15347
15348 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15349 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15350 Assert(!pVCpu->iem.s.cActiveMappings);
15351 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15352}
15353
15354
15355/**
15356 * Interface for HM and EM to emulate the INVLPG instruction.
15357 *
15358 * @returns Strict VBox status code.
15359 * @retval VINF_PGM_SYNC_CR3
15360 *
15361 * @param pVCpu The cross context virtual CPU structure.
15362 * @param cbInstr The instruction length in bytes.
15363 * @param GCPtrPage The effective address of the page to invalidate.
15364 *
15365 * @remarks In ring-0 not all of the state needs to be synced in.
15366 */
15367VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15368{
15369 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15370
15371 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15372 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15373 Assert(!pVCpu->iem.s.cActiveMappings);
15374 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15375}
15376
15377
15378/**
15379 * Interface for HM and EM to emulate the CPUID instruction.
15380 *
15381 * @returns Strict VBox status code.
15382 *
15383 * @param pVCpu The cross context virtual CPU structure.
15384 * @param cbInstr The instruction length in bytes.
15385 *
15386 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15387 */
15388VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15389{
15390 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15391 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15392
15393 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15394 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15395 Assert(!pVCpu->iem.s.cActiveMappings);
15396 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15397}
15398
15399
15400/**
15401 * Interface for HM and EM to emulate the RDPMC instruction.
15402 *
15403 * @returns Strict VBox status code.
15404 *
15405 * @param pVCpu The cross context virtual CPU structure.
15406 * @param cbInstr The instruction length in bytes.
15407 *
15408 * @remarks Not all of the state needs to be synced in.
15409 */
15410VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15411{
15412 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15413 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15414
15415 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15416 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15417 Assert(!pVCpu->iem.s.cActiveMappings);
15418 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15419}
15420
15421
15422/**
15423 * Interface for HM and EM to emulate the RDTSC instruction.
15424 *
15425 * @returns Strict VBox status code.
15426 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15427 *
15428 * @param pVCpu The cross context virtual CPU structure.
15429 * @param cbInstr The instruction length in bytes.
15430 *
15431 * @remarks Not all of the state needs to be synced in.
15432 */
15433VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15434{
15435 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15436 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15437
15438 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15439 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15440 Assert(!pVCpu->iem.s.cActiveMappings);
15441 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15442}
15443
15444
15445/**
15446 * Interface for HM and EM to emulate the RDTSCP instruction.
15447 *
15448 * @returns Strict VBox status code.
15449 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15450 *
15451 * @param pVCpu The cross context virtual CPU structure.
15452 * @param cbInstr The instruction length in bytes.
15453 *
15454 * @remarks Not all of the state needs to be synced in. Recommended
15455 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15456 */
15457VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15458{
15459 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15460 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15461
15462 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15463 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15464 Assert(!pVCpu->iem.s.cActiveMappings);
15465 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15466}
15467
15468
15469/**
15470 * Interface for HM and EM to emulate the RDMSR instruction.
15471 *
15472 * @returns Strict VBox status code.
15473 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15474 *
15475 * @param pVCpu The cross context virtual CPU structure.
15476 * @param cbInstr The instruction length in bytes.
15477 *
15478 * @remarks Not all of the state needs to be synced in. Requires RCX and
15479 * (currently) all MSRs.
15480 */
15481VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15482{
15483 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15484 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15485
15486 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15487 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15488 Assert(!pVCpu->iem.s.cActiveMappings);
15489 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15490}
15491
15492
15493/**
15494 * Interface for HM and EM to emulate the WRMSR instruction.
15495 *
15496 * @returns Strict VBox status code.
15497 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15498 *
15499 * @param pVCpu The cross context virtual CPU structure.
15500 * @param cbInstr The instruction length in bytes.
15501 *
15502 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15503 * and (currently) all MSRs.
15504 */
15505VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15506{
15507 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15508 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15509 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15510
15511 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15512 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15513 Assert(!pVCpu->iem.s.cActiveMappings);
15514 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15515}
15516
15517
15518/**
15519 * Interface for HM and EM to emulate the MONITOR instruction.
15520 *
15521 * @returns Strict VBox status code.
15522 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15523 *
15524 * @param pVCpu The cross context virtual CPU structure.
15525 * @param cbInstr The instruction length in bytes.
15526 *
15527 * @remarks Not all of the state needs to be synced in.
15528 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15529 * are used.
15530 */
15531VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15532{
15533 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15534 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15535
15536 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15537 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15538 Assert(!pVCpu->iem.s.cActiveMappings);
15539 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15540}
15541
15542
15543/**
15544 * Interface for HM and EM to emulate the MWAIT instruction.
15545 *
15546 * @returns Strict VBox status code.
15547 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15548 *
15549 * @param pVCpu The cross context virtual CPU structure.
15550 * @param cbInstr The instruction length in bytes.
15551 *
15552 * @remarks Not all of the state needs to be synced in.
15553 */
15554VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15555{
15556 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15557
15558 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15559 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15560 Assert(!pVCpu->iem.s.cActiveMappings);
15561 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15562}
15563
15564
15565/**
15566 * Interface for HM and EM to emulate the HLT instruction.
15567 *
15568 * @returns Strict VBox status code.
15569 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15570 *
15571 * @param pVCpu The cross context virtual CPU structure.
15572 * @param cbInstr The instruction length in bytes.
15573 *
15574 * @remarks Not all of the state needs to be synced in.
15575 */
15576VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15577{
15578 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15579
15580 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15581 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15582 Assert(!pVCpu->iem.s.cActiveMappings);
15583 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15584}
15585
15586
15587/**
15588 * Checks if IEM is in the process of delivering an event (interrupt or
15589 * exception).
15590 *
15591 * @returns true if we're in the process of raising an interrupt or exception,
15592 * false otherwise.
15593 * @param pVCpu The cross context virtual CPU structure.
15594 * @param puVector Where to store the vector associated with the
15595 * currently delivered event, optional.
15596 * @param pfFlags Where to store th event delivery flags (see
15597 * IEM_XCPT_FLAGS_XXX), optional.
15598 * @param puErr Where to store the error code associated with the
15599 * event, optional.
15600 * @param puCr2 Where to store the CR2 associated with the event,
15601 * optional.
15602 * @remarks The caller should check the flags to determine if the error code and
15603 * CR2 are valid for the event.
15604 */
15605VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15606{
15607 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15608 if (fRaisingXcpt)
15609 {
15610 if (puVector)
15611 *puVector = pVCpu->iem.s.uCurXcpt;
15612 if (pfFlags)
15613 *pfFlags = pVCpu->iem.s.fCurXcpt;
15614 if (puErr)
15615 *puErr = pVCpu->iem.s.uCurXcptErr;
15616 if (puCr2)
15617 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15618 }
15619 return fRaisingXcpt;
15620}
15621
15622#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15623
15624/**
15625 * Interface for HM and EM to emulate the CLGI instruction.
15626 *
15627 * @returns Strict VBox status code.
15628 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15629 * @param cbInstr The instruction length in bytes.
15630 * @thread EMT(pVCpu)
15631 */
15632VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15633{
15634 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15635
15636 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15637 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15638 Assert(!pVCpu->iem.s.cActiveMappings);
15639 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15640}
15641
15642
15643/**
15644 * Interface for HM and EM to emulate the STGI instruction.
15645 *
15646 * @returns Strict VBox status code.
15647 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15648 * @param cbInstr The instruction length in bytes.
15649 * @thread EMT(pVCpu)
15650 */
15651VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15652{
15653 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15654
15655 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15656 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15657 Assert(!pVCpu->iem.s.cActiveMappings);
15658 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15659}
15660
15661
15662/**
15663 * Interface for HM and EM to emulate the VMLOAD instruction.
15664 *
15665 * @returns Strict VBox status code.
15666 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15667 * @param cbInstr The instruction length in bytes.
15668 * @thread EMT(pVCpu)
15669 */
15670VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15671{
15672 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15673
15674 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15675 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15676 Assert(!pVCpu->iem.s.cActiveMappings);
15677 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15678}
15679
15680
15681/**
15682 * Interface for HM and EM to emulate the VMSAVE instruction.
15683 *
15684 * @returns Strict VBox status code.
15685 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15686 * @param cbInstr The instruction length in bytes.
15687 * @thread EMT(pVCpu)
15688 */
15689VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15690{
15691 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15692
15693 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15694 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15695 Assert(!pVCpu->iem.s.cActiveMappings);
15696 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15697}
15698
15699
15700/**
15701 * Interface for HM and EM to emulate the INVLPGA instruction.
15702 *
15703 * @returns Strict VBox status code.
15704 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15705 * @param cbInstr The instruction length in bytes.
15706 * @thread EMT(pVCpu)
15707 */
15708VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15709{
15710 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15711
15712 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15713 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15714 Assert(!pVCpu->iem.s.cActiveMappings);
15715 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15716}
15717
15718
15719/**
15720 * Interface for HM and EM to emulate the VMRUN instruction.
15721 *
15722 * @returns Strict VBox status code.
15723 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15724 * @param cbInstr The instruction length in bytes.
15725 * @thread EMT(pVCpu)
15726 */
15727VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15728{
15729 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15730 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15731
15732 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15733 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15734 Assert(!pVCpu->iem.s.cActiveMappings);
15735 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15736}
15737
15738
15739/**
15740 * Interface for HM and EM to emulate \#VMEXIT.
15741 *
15742 * @returns Strict VBox status code.
15743 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15744 * @param uExitCode The exit code.
15745 * @param uExitInfo1 The exit info. 1 field.
15746 * @param uExitInfo2 The exit info. 2 field.
15747 * @thread EMT(pVCpu)
15748 */
15749VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15750{
15751 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15752 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15753 if (pVCpu->iem.s.cActiveMappings)
15754 iemMemRollback(pVCpu);
15755 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15756}
15757
15758#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15759
15760#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15761
15762/**
15763 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15764 *
15765 * @returns Strict VBox status code.
15766 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15767 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15768 * the x2APIC device.
15769 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15770 *
15771 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15772 * @param idMsr The MSR being read.
15773 * @param pu64Value Pointer to the value being written or where to store the
15774 * value being read.
15775 * @param fWrite Whether this is an MSR write or read access.
15776 * @thread EMT(pVCpu)
15777 */
15778VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15779{
15780 Assert(pu64Value);
15781
15782 VBOXSTRICTRC rcStrict;
15783 if (!fWrite)
15784 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15785 else
15786 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15787 if (pVCpu->iem.s.cActiveMappings)
15788 iemMemRollback(pVCpu);
15789 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15790
15791}
15792
15793
15794/**
15795 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15796 *
15797 * @returns Strict VBox status code.
15798 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15799 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15800 *
15801 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15802 * @param offAccess The offset of the register being accessed (within the
15803 * APIC-access page).
15804 * @param cbAccess The size of the access in bytes.
15805 * @param pvData Pointer to the data being written or where to store the data
15806 * being read.
15807 * @param fWrite Whether this is a write or read access.
15808 * @thread EMT(pVCpu)
15809 */
15810VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
15811 bool fWrite)
15812{
15813 Assert(pvData);
15814
15815 /** @todo NSTVMX: Unfortunately, the caller has no idea about instruction fetch
15816 * accesses, so we only use read/write here. Maybe in the future the PGM
15817 * physical handler will be extended to include this information? */
15818 uint32_t const fAccess = fWrite ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
15819 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbAccess, pvData, fAccess);
15820 if (pVCpu->iem.s.cActiveMappings)
15821 iemMemRollback(pVCpu);
15822 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15823}
15824
15825
15826/**
15827 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15828 * VM-exit.
15829 *
15830 * @returns Strict VBox status code.
15831 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15832 * @thread EMT(pVCpu)
15833 */
15834VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15835{
15836 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15837 if (pVCpu->iem.s.cActiveMappings)
15838 iemMemRollback(pVCpu);
15839 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15840}
15841
15842
15843/**
15844 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15845 *
15846 * @returns Strict VBox status code.
15847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15848 * @thread EMT(pVCpu)
15849 */
15850VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15851{
15852 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15853 if (pVCpu->iem.s.cActiveMappings)
15854 iemMemRollback(pVCpu);
15855 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15856}
15857
15858
15859/**
15860 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15861 *
15862 * @returns Strict VBox status code.
15863 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15864 * @param uVector The external interrupt vector (pass 0 if the external
15865 * interrupt is still pending).
15866 * @param fIntPending Whether the external interrupt is pending or
15867 * acknowdledged in the interrupt controller.
15868 * @thread EMT(pVCpu)
15869 */
15870VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15871{
15872 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15873 if (pVCpu->iem.s.cActiveMappings)
15874 iemMemRollback(pVCpu);
15875 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15876}
15877
15878
15879/**
15880 * Interface for HM and EM to emulate VM-exit due to NMIs.
15881 *
15882 * @returns Strict VBox status code.
15883 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15884 * @thread EMT(pVCpu)
15885 */
15886VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmi(PVMCPU pVCpu)
15887{
15888 VBOXSTRICTRC rcStrict = iemVmxVmexitNmi(pVCpu);
15889 if (pVCpu->iem.s.cActiveMappings)
15890 iemMemRollback(pVCpu);
15891 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15892}
15893
15894
15895/**
15896 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15897 *
15898 * @returns Strict VBox status code.
15899 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15900 * @param uVector The SIPI vector.
15901 * @thread EMT(pVCpu)
15902 */
15903VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15904{
15905 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15906 if (pVCpu->iem.s.cActiveMappings)
15907 iemMemRollback(pVCpu);
15908 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15909}
15910
15911
15912/**
15913 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15914 *
15915 * @returns Strict VBox status code.
15916 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15917 * @thread EMT(pVCpu)
15918 */
15919VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15920{
15921 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15922 if (pVCpu->iem.s.cActiveMappings)
15923 iemMemRollback(pVCpu);
15924 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15925}
15926
15927
15928/**
15929 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15930 *
15931 * @returns Strict VBox status code.
15932 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15933 * @thread EMT(pVCpu)
15934 */
15935VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15936{
15937 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15938 if (pVCpu->iem.s.cActiveMappings)
15939 iemMemRollback(pVCpu);
15940 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15941}
15942
15943
15944/**
15945 * Interface for HM and EM to emulate VM-exits for NMI-windows.
15946 *
15947 * @returns Strict VBox status code.
15948 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15949 * @thread EMT(pVCpu)
15950 */
15951VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmiWindow(PVMCPU pVCpu)
15952{
15953 VBOXSTRICTRC rcStrict = iemVmxVmexitNmiWindow(pVCpu);
15954 if (pVCpu->iem.s.cActiveMappings)
15955 iemMemRollback(pVCpu);
15956 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15957}
15958
15959
15960/**
15961 * Interface for HM and EM to emulate VM-exits Monitor-Trap Flag (MTF).
15962 *
15963 * @returns Strict VBox status code.
15964 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15965 * @thread EMT(pVCpu)
15966 */
15967VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitMtf(PVMCPU pVCpu)
15968{
15969 VBOXSTRICTRC rcStrict = iemVmxVmexitMtf(pVCpu);
15970 if (pVCpu->iem.s.cActiveMappings)
15971 iemMemRollback(pVCpu);
15972 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15973}
15974
15975
15976/**
15977 * Interface for HM and EM to emulate the VMREAD instruction.
15978 *
15979 * @returns Strict VBox status code.
15980 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15981 * @param pExitInfo Pointer to the VM-exit information struct.
15982 * @thread EMT(pVCpu)
15983 */
15984VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15985{
15986 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15987 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15988 Assert(pExitInfo);
15989
15990 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15991
15992 VBOXSTRICTRC rcStrict;
15993 uint8_t const cbInstr = pExitInfo->cbInstr;
15994 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15995 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15996 {
15997 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15998 {
15999 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16000 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
16001 }
16002 else
16003 {
16004 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16005 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
16006 }
16007 }
16008 else
16009 {
16010 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
16011 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16012 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
16013 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
16014 }
16015 Assert(!pVCpu->iem.s.cActiveMappings);
16016 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16017}
16018
16019
16020/**
16021 * Interface for HM and EM to emulate the VMWRITE instruction.
16022 *
16023 * @returns Strict VBox status code.
16024 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16025 * @param pExitInfo Pointer to the VM-exit information struct.
16026 * @thread EMT(pVCpu)
16027 */
16028VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16029{
16030 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16031 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16032 Assert(pExitInfo);
16033
16034 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16035
16036 uint64_t u64Val;
16037 uint8_t iEffSeg;
16038 IEMMODE enmEffAddrMode;
16039 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16040 {
16041 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16042 iEffSeg = UINT8_MAX;
16043 enmEffAddrMode = UINT8_MAX;
16044 }
16045 else
16046 {
16047 u64Val = pExitInfo->GCPtrEffAddr;
16048 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16049 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
16050 }
16051 uint8_t const cbInstr = pExitInfo->cbInstr;
16052 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16053 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
16054 Assert(!pVCpu->iem.s.cActiveMappings);
16055 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16056}
16057
16058
16059/**
16060 * Interface for HM and EM to emulate the VMPTRLD instruction.
16061 *
16062 * @returns Strict VBox status code.
16063 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16064 * @param pExitInfo Pointer to the VM-exit information struct.
16065 * @thread EMT(pVCpu)
16066 */
16067VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16068{
16069 Assert(pExitInfo);
16070 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16071 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16072
16073 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16074
16075 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16076 uint8_t const cbInstr = pExitInfo->cbInstr;
16077 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16078 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16079 Assert(!pVCpu->iem.s.cActiveMappings);
16080 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16081}
16082
16083
16084/**
16085 * Interface for HM and EM to emulate the VMPTRST instruction.
16086 *
16087 * @returns Strict VBox status code.
16088 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16089 * @param pExitInfo Pointer to the VM-exit information struct.
16090 * @thread EMT(pVCpu)
16091 */
16092VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16093{
16094 Assert(pExitInfo);
16095 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16096 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16097
16098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16099
16100 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16101 uint8_t const cbInstr = pExitInfo->cbInstr;
16102 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16103 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16104 Assert(!pVCpu->iem.s.cActiveMappings);
16105 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16106}
16107
16108
16109/**
16110 * Interface for HM and EM to emulate the VMCLEAR instruction.
16111 *
16112 * @returns Strict VBox status code.
16113 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16114 * @param pExitInfo Pointer to the VM-exit information struct.
16115 * @thread EMT(pVCpu)
16116 */
16117VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16118{
16119 Assert(pExitInfo);
16120 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16121 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16122
16123 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16124
16125 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16126 uint8_t const cbInstr = pExitInfo->cbInstr;
16127 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16128 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16129 Assert(!pVCpu->iem.s.cActiveMappings);
16130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16131}
16132
16133
16134/**
16135 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16136 *
16137 * @returns Strict VBox status code.
16138 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16139 * @param cbInstr The instruction length in bytes.
16140 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16141 * VMXINSTRID_VMRESUME).
16142 * @thread EMT(pVCpu)
16143 */
16144VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16145{
16146 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16147 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16148
16149 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16150 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16151 Assert(!pVCpu->iem.s.cActiveMappings);
16152 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16153}
16154
16155
16156/**
16157 * Interface for HM and EM to emulate the VMXON instruction.
16158 *
16159 * @returns Strict VBox status code.
16160 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16161 * @param pExitInfo Pointer to the VM-exit information struct.
16162 * @thread EMT(pVCpu)
16163 */
16164VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16165{
16166 Assert(pExitInfo);
16167 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16168 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16169
16170 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16171
16172 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16173 uint8_t const cbInstr = pExitInfo->cbInstr;
16174 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16175 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16176 Assert(!pVCpu->iem.s.cActiveMappings);
16177 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16178}
16179
16180
16181/**
16182 * Interface for HM and EM to emulate the VMXOFF instruction.
16183 *
16184 * @returns Strict VBox status code.
16185 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16186 * @param cbInstr The instruction length in bytes.
16187 * @thread EMT(pVCpu)
16188 */
16189VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16190{
16191 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16192 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16193
16194 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16195 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16196 Assert(!pVCpu->iem.s.cActiveMappings);
16197 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16198}
16199
16200
16201/**
16202 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16203 *
16204 * @remarks The @a pvUser argument is currently unused.
16205 */
16206PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16207 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16208 PGMACCESSORIGIN enmOrigin, void *pvUser)
16209{
16210 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16211
16212 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16213 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16214 {
16215 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16216 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16217
16218 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16219 * Currently they will go through as read accesses. */
16220 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16221 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16222 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16223 if (RT_FAILURE(rcStrict))
16224 return rcStrict;
16225
16226 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16227 return VINF_SUCCESS;
16228 }
16229
16230 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16231 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16232 if (RT_FAILURE(rc))
16233 return rc;
16234
16235 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16236 return VINF_PGM_HANDLER_DO_DEFAULT;
16237}
16238
16239#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16240
16241#ifdef IN_RING3
16242
16243/**
16244 * Handles the unlikely and probably fatal merge cases.
16245 *
16246 * @returns Merged status code.
16247 * @param rcStrict Current EM status code.
16248 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16249 * with @a rcStrict.
16250 * @param iMemMap The memory mapping index. For error reporting only.
16251 * @param pVCpu The cross context virtual CPU structure of the calling
16252 * thread, for error reporting only.
16253 */
16254DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16255 unsigned iMemMap, PVMCPU pVCpu)
16256{
16257 if (RT_FAILURE_NP(rcStrict))
16258 return rcStrict;
16259
16260 if (RT_FAILURE_NP(rcStrictCommit))
16261 return rcStrictCommit;
16262
16263 if (rcStrict == rcStrictCommit)
16264 return rcStrictCommit;
16265
16266 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16267 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16268 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16269 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16270 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16271 return VERR_IOM_FF_STATUS_IPE;
16272}
16273
16274
16275/**
16276 * Helper for IOMR3ProcessForceFlag.
16277 *
16278 * @returns Merged status code.
16279 * @param rcStrict Current EM status code.
16280 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16281 * with @a rcStrict.
16282 * @param iMemMap The memory mapping index. For error reporting only.
16283 * @param pVCpu The cross context virtual CPU structure of the calling
16284 * thread, for error reporting only.
16285 */
16286DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16287{
16288 /* Simple. */
16289 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16290 return rcStrictCommit;
16291
16292 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16293 return rcStrict;
16294
16295 /* EM scheduling status codes. */
16296 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16297 && rcStrict <= VINF_EM_LAST))
16298 {
16299 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16300 && rcStrictCommit <= VINF_EM_LAST))
16301 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16302 }
16303
16304 /* Unlikely */
16305 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16306}
16307
16308
16309/**
16310 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16311 *
16312 * @returns Merge between @a rcStrict and what the commit operation returned.
16313 * @param pVM The cross context VM structure.
16314 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16315 * @param rcStrict The status code returned by ring-0 or raw-mode.
16316 */
16317VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16318{
16319 /*
16320 * Reset the pending commit.
16321 */
16322 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16323 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16324 ("%#x %#x %#x\n",
16325 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16326 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16327
16328 /*
16329 * Commit the pending bounce buffers (usually just one).
16330 */
16331 unsigned cBufs = 0;
16332 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16333 while (iMemMap-- > 0)
16334 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16335 {
16336 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16337 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16338 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16339
16340 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16341 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16342 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16343
16344 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16345 {
16346 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16347 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16348 pbBuf,
16349 cbFirst,
16350 PGMACCESSORIGIN_IEM);
16351 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16352 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16353 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16354 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16355 }
16356
16357 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16358 {
16359 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16361 pbBuf + cbFirst,
16362 cbSecond,
16363 PGMACCESSORIGIN_IEM);
16364 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16365 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16366 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16367 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16368 }
16369 cBufs++;
16370 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16371 }
16372
16373 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16374 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16375 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16376 pVCpu->iem.s.cActiveMappings = 0;
16377 return rcStrict;
16378}
16379
16380#endif /* IN_RING3 */
16381
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette