VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 79059

Last change on this file since 79059 was 79031, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 The Exit qualification is mandatory for all VM-exits (never undefined), so pass it explicitly to iemVmxVmexit along with the Exit reason. Adjusted all code to do this.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 651.9 KB
Line 
1/* $Id: IEMAll.cpp 79031 2019-06-07 05:58:55Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handle for triple faults.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
450 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
986IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
987IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
988#endif
989
990#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
991IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
992IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
993#endif
994
995
996/**
997 * Sets the pass up status.
998 *
999 * @returns VINF_SUCCESS.
1000 * @param pVCpu The cross context virtual CPU structure of the
1001 * calling thread.
1002 * @param rcPassUp The pass up status. Must be informational.
1003 * VINF_SUCCESS is not allowed.
1004 */
1005IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1006{
1007 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1008
1009 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1010 if (rcOldPassUp == VINF_SUCCESS)
1011 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1012 /* If both are EM scheduling codes, use EM priority rules. */
1013 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1014 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1015 {
1016 if (rcPassUp < rcOldPassUp)
1017 {
1018 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1019 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1020 }
1021 else
1022 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1023 }
1024 /* Override EM scheduling with specific status code. */
1025 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1026 {
1027 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1028 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1029 }
1030 /* Don't override specific status code, first come first served. */
1031 else
1032 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1033 return VINF_SUCCESS;
1034}
1035
1036
1037/**
1038 * Calculates the CPU mode.
1039 *
1040 * This is mainly for updating IEMCPU::enmCpuMode.
1041 *
1042 * @returns CPU mode.
1043 * @param pVCpu The cross context virtual CPU structure of the
1044 * calling thread.
1045 */
1046DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1047{
1048 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1049 return IEMMODE_64BIT;
1050 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1051 return IEMMODE_32BIT;
1052 return IEMMODE_16BIT;
1053}
1054
1055
1056/**
1057 * Initializes the execution state.
1058 *
1059 * @param pVCpu The cross context virtual CPU structure of the
1060 * calling thread.
1061 * @param fBypassHandlers Whether to bypass access handlers.
1062 *
1063 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1064 * side-effects in strict builds.
1065 */
1066DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1067{
1068 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1069 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1070
1071#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1080#endif
1081
1082#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1083 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1084#endif
1085 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1086 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1087#ifdef VBOX_STRICT
1088 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1089 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1090 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1091 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1092 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1093 pVCpu->iem.s.uRexReg = 127;
1094 pVCpu->iem.s.uRexB = 127;
1095 pVCpu->iem.s.offModRm = 127;
1096 pVCpu->iem.s.uRexIndex = 127;
1097 pVCpu->iem.s.iEffSeg = 127;
1098 pVCpu->iem.s.idxPrefix = 127;
1099 pVCpu->iem.s.uVex3rdReg = 127;
1100 pVCpu->iem.s.uVexLength = 127;
1101 pVCpu->iem.s.fEvexStuff = 127;
1102 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1103# ifdef IEM_WITH_CODE_TLB
1104 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1105 pVCpu->iem.s.pbInstrBuf = NULL;
1106 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1107 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1108 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1109 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1110# else
1111 pVCpu->iem.s.offOpcode = 127;
1112 pVCpu->iem.s.cbOpcode = 127;
1113# endif
1114#endif
1115
1116 pVCpu->iem.s.cActiveMappings = 0;
1117 pVCpu->iem.s.iNextMapping = 0;
1118 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1119 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1120#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1121 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1122 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1123 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1124 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1125 if (!pVCpu->iem.s.fInPatchCode)
1126 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1127#endif
1128}
1129
1130#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1131/**
1132 * Performs a minimal reinitialization of the execution state.
1133 *
1134 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1135 * 'world-switch' types operations on the CPU. Currently only nested
1136 * hardware-virtualization uses it.
1137 *
1138 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1139 */
1140IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1141{
1142 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1143 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1144
1145 pVCpu->iem.s.uCpl = uCpl;
1146 pVCpu->iem.s.enmCpuMode = enmMode;
1147 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1148 pVCpu->iem.s.enmEffAddrMode = enmMode;
1149 if (enmMode != IEMMODE_64BIT)
1150 {
1151 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1152 pVCpu->iem.s.enmEffOpSize = enmMode;
1153 }
1154 else
1155 {
1156 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1157 pVCpu->iem.s.enmEffOpSize = enmMode;
1158 }
1159 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1160#ifndef IEM_WITH_CODE_TLB
1161 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1162 pVCpu->iem.s.offOpcode = 0;
1163 pVCpu->iem.s.cbOpcode = 0;
1164#endif
1165 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1166}
1167#endif
1168
1169/**
1170 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1171 *
1172 * @param pVCpu The cross context virtual CPU structure of the
1173 * calling thread.
1174 */
1175DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1176{
1177 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1178#ifdef VBOX_STRICT
1179# ifdef IEM_WITH_CODE_TLB
1180 NOREF(pVCpu);
1181# else
1182 pVCpu->iem.s.cbOpcode = 0;
1183# endif
1184#else
1185 NOREF(pVCpu);
1186#endif
1187}
1188
1189
1190/**
1191 * Initializes the decoder state.
1192 *
1193 * iemReInitDecoder is mostly a copy of this function.
1194 *
1195 * @param pVCpu The cross context virtual CPU structure of the
1196 * calling thread.
1197 * @param fBypassHandlers Whether to bypass access handlers.
1198 */
1199DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1200{
1201 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1202 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1203
1204#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1213#endif
1214
1215#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1216 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1217#endif
1218 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1219 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1220 pVCpu->iem.s.enmCpuMode = enmMode;
1221 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1222 pVCpu->iem.s.enmEffAddrMode = enmMode;
1223 if (enmMode != IEMMODE_64BIT)
1224 {
1225 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1226 pVCpu->iem.s.enmEffOpSize = enmMode;
1227 }
1228 else
1229 {
1230 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1231 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1232 }
1233 pVCpu->iem.s.fPrefixes = 0;
1234 pVCpu->iem.s.uRexReg = 0;
1235 pVCpu->iem.s.uRexB = 0;
1236 pVCpu->iem.s.uRexIndex = 0;
1237 pVCpu->iem.s.idxPrefix = 0;
1238 pVCpu->iem.s.uVex3rdReg = 0;
1239 pVCpu->iem.s.uVexLength = 0;
1240 pVCpu->iem.s.fEvexStuff = 0;
1241 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1242#ifdef IEM_WITH_CODE_TLB
1243 pVCpu->iem.s.pbInstrBuf = NULL;
1244 pVCpu->iem.s.offInstrNextByte = 0;
1245 pVCpu->iem.s.offCurInstrStart = 0;
1246# ifdef VBOX_STRICT
1247 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1248 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1249 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1250# endif
1251#else
1252 pVCpu->iem.s.offOpcode = 0;
1253 pVCpu->iem.s.cbOpcode = 0;
1254#endif
1255 pVCpu->iem.s.offModRm = 0;
1256 pVCpu->iem.s.cActiveMappings = 0;
1257 pVCpu->iem.s.iNextMapping = 0;
1258 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1259 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1260#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1261 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1262 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1263 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1264 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1265 if (!pVCpu->iem.s.fInPatchCode)
1266 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1267#endif
1268
1269#ifdef DBGFTRACE_ENABLED
1270 switch (enmMode)
1271 {
1272 case IEMMODE_64BIT:
1273 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1274 break;
1275 case IEMMODE_32BIT:
1276 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1277 break;
1278 case IEMMODE_16BIT:
1279 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1280 break;
1281 }
1282#endif
1283}
1284
1285
1286/**
1287 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1288 *
1289 * This is mostly a copy of iemInitDecoder.
1290 *
1291 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1292 */
1293DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1294{
1295 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1296
1297#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1298 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1300 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1302 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1306#endif
1307
1308 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1309 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1310 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1311 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1312 pVCpu->iem.s.enmEffAddrMode = enmMode;
1313 if (enmMode != IEMMODE_64BIT)
1314 {
1315 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1316 pVCpu->iem.s.enmEffOpSize = enmMode;
1317 }
1318 else
1319 {
1320 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1321 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1322 }
1323 pVCpu->iem.s.fPrefixes = 0;
1324 pVCpu->iem.s.uRexReg = 0;
1325 pVCpu->iem.s.uRexB = 0;
1326 pVCpu->iem.s.uRexIndex = 0;
1327 pVCpu->iem.s.idxPrefix = 0;
1328 pVCpu->iem.s.uVex3rdReg = 0;
1329 pVCpu->iem.s.uVexLength = 0;
1330 pVCpu->iem.s.fEvexStuff = 0;
1331 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1332#ifdef IEM_WITH_CODE_TLB
1333 if (pVCpu->iem.s.pbInstrBuf)
1334 {
1335 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1336 - pVCpu->iem.s.uInstrBufPc;
1337 if (off < pVCpu->iem.s.cbInstrBufTotal)
1338 {
1339 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1340 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1341 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1342 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1343 else
1344 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1345 }
1346 else
1347 {
1348 pVCpu->iem.s.pbInstrBuf = NULL;
1349 pVCpu->iem.s.offInstrNextByte = 0;
1350 pVCpu->iem.s.offCurInstrStart = 0;
1351 pVCpu->iem.s.cbInstrBuf = 0;
1352 pVCpu->iem.s.cbInstrBufTotal = 0;
1353 }
1354 }
1355 else
1356 {
1357 pVCpu->iem.s.offInstrNextByte = 0;
1358 pVCpu->iem.s.offCurInstrStart = 0;
1359 pVCpu->iem.s.cbInstrBuf = 0;
1360 pVCpu->iem.s.cbInstrBufTotal = 0;
1361 }
1362#else
1363 pVCpu->iem.s.cbOpcode = 0;
1364 pVCpu->iem.s.offOpcode = 0;
1365#endif
1366 pVCpu->iem.s.offModRm = 0;
1367 Assert(pVCpu->iem.s.cActiveMappings == 0);
1368 pVCpu->iem.s.iNextMapping = 0;
1369 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1370 Assert(pVCpu->iem.s.fBypassHandlers == false);
1371#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1372 if (!pVCpu->iem.s.fInPatchCode)
1373 { /* likely */ }
1374 else
1375 {
1376 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1377 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1378 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1379 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1380 if (!pVCpu->iem.s.fInPatchCode)
1381 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1382 }
1383#endif
1384
1385#ifdef DBGFTRACE_ENABLED
1386 switch (enmMode)
1387 {
1388 case IEMMODE_64BIT:
1389 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1390 break;
1391 case IEMMODE_32BIT:
1392 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1393 break;
1394 case IEMMODE_16BIT:
1395 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1396 break;
1397 }
1398#endif
1399}
1400
1401
1402
1403/**
1404 * Prefetch opcodes the first time when starting executing.
1405 *
1406 * @returns Strict VBox status code.
1407 * @param pVCpu The cross context virtual CPU structure of the
1408 * calling thread.
1409 * @param fBypassHandlers Whether to bypass access handlers.
1410 */
1411IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1412{
1413 iemInitDecoder(pVCpu, fBypassHandlers);
1414
1415#ifdef IEM_WITH_CODE_TLB
1416 /** @todo Do ITLB lookup here. */
1417
1418#else /* !IEM_WITH_CODE_TLB */
1419
1420 /*
1421 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1422 *
1423 * First translate CS:rIP to a physical address.
1424 */
1425 uint32_t cbToTryRead;
1426 RTGCPTR GCPtrPC;
1427 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1428 {
1429 cbToTryRead = PAGE_SIZE;
1430 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1431 if (IEM_IS_CANONICAL(GCPtrPC))
1432 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1433 else
1434 return iemRaiseGeneralProtectionFault0(pVCpu);
1435 }
1436 else
1437 {
1438 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1439 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1440 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1441 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1442 else
1443 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1444 if (cbToTryRead) { /* likely */ }
1445 else /* overflowed */
1446 {
1447 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1448 cbToTryRead = UINT32_MAX;
1449 }
1450 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1451 Assert(GCPtrPC <= UINT32_MAX);
1452 }
1453
1454# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1455 /* Allow interpretation of patch manager code blocks since they can for
1456 instance throw #PFs for perfectly good reasons. */
1457 if (pVCpu->iem.s.fInPatchCode)
1458 {
1459 size_t cbRead = 0;
1460 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1461 AssertRCReturn(rc, rc);
1462 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1463 return VINF_SUCCESS;
1464 }
1465# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1466
1467 RTGCPHYS GCPhys;
1468 uint64_t fFlags;
1469 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1470 if (RT_SUCCESS(rc)) { /* probable */ }
1471 else
1472 {
1473 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1474 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1475 }
1476 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1477 else
1478 {
1479 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1480 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1481 }
1482 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1483 else
1484 {
1485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1486 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1487 }
1488 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1489 /** @todo Check reserved bits and such stuff. PGM is better at doing
1490 * that, so do it when implementing the guest virtual address
1491 * TLB... */
1492
1493 /*
1494 * Read the bytes at this address.
1495 */
1496 PVM pVM = pVCpu->CTX_SUFF(pVM);
1497# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1498 size_t cbActual;
1499 if ( PATMIsEnabled(pVM)
1500 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1501 {
1502 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1503 Assert(cbActual > 0);
1504 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1505 }
1506 else
1507# endif
1508 {
1509 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1510 if (cbToTryRead > cbLeftOnPage)
1511 cbToTryRead = cbLeftOnPage;
1512 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1513 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1514
1515 if (!pVCpu->iem.s.fBypassHandlers)
1516 {
1517 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1518 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1519 { /* likely */ }
1520 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1521 {
1522 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1523 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1524 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1525 }
1526 else
1527 {
1528 Log((RT_SUCCESS(rcStrict)
1529 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1530 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1531 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1532 return rcStrict;
1533 }
1534 }
1535 else
1536 {
1537 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1538 if (RT_SUCCESS(rc))
1539 { /* likely */ }
1540 else
1541 {
1542 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1543 GCPtrPC, GCPhys, rc, cbToTryRead));
1544 return rc;
1545 }
1546 }
1547 pVCpu->iem.s.cbOpcode = cbToTryRead;
1548 }
1549#endif /* !IEM_WITH_CODE_TLB */
1550 return VINF_SUCCESS;
1551}
1552
1553
1554/**
1555 * Invalidates the IEM TLBs.
1556 *
1557 * This is called internally as well as by PGM when moving GC mappings.
1558 *
1559 * @returns
1560 * @param pVCpu The cross context virtual CPU structure of the calling
1561 * thread.
1562 * @param fVmm Set when PGM calls us with a remapping.
1563 */
1564VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1565{
1566#ifdef IEM_WITH_CODE_TLB
1567 pVCpu->iem.s.cbInstrBufTotal = 0;
1568 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1569 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1570 { /* very likely */ }
1571 else
1572 {
1573 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1574 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1575 while (i-- > 0)
1576 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1577 }
1578#endif
1579
1580#ifdef IEM_WITH_DATA_TLB
1581 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1582 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1583 { /* very likely */ }
1584 else
1585 {
1586 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1587 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1588 while (i-- > 0)
1589 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1590 }
1591#endif
1592 NOREF(pVCpu); NOREF(fVmm);
1593}
1594
1595
1596/**
1597 * Invalidates a page in the TLBs.
1598 *
1599 * @param pVCpu The cross context virtual CPU structure of the calling
1600 * thread.
1601 * @param GCPtr The address of the page to invalidate
1602 */
1603VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1604{
1605#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1606 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1607 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1608 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1609 uintptr_t idx = (uint8_t)GCPtr;
1610
1611# ifdef IEM_WITH_CODE_TLB
1612 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1613 {
1614 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1615 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1616 pVCpu->iem.s.cbInstrBufTotal = 0;
1617 }
1618# endif
1619
1620# ifdef IEM_WITH_DATA_TLB
1621 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1622 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1623# endif
1624#else
1625 NOREF(pVCpu); NOREF(GCPtr);
1626#endif
1627}
1628
1629
1630/**
1631 * Invalidates the host physical aspects of the IEM TLBs.
1632 *
1633 * This is called internally as well as by PGM when moving GC mappings.
1634 *
1635 * @param pVCpu The cross context virtual CPU structure of the calling
1636 * thread.
1637 */
1638VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1639{
1640#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1641 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1642
1643# ifdef IEM_WITH_CODE_TLB
1644 pVCpu->iem.s.cbInstrBufTotal = 0;
1645# endif
1646 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1647 if (uTlbPhysRev != 0)
1648 {
1649 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1650 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1651 }
1652 else
1653 {
1654 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1655 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1656
1657 unsigned i;
1658# ifdef IEM_WITH_CODE_TLB
1659 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1660 while (i-- > 0)
1661 {
1662 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1663 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1664 }
1665# endif
1666# ifdef IEM_WITH_DATA_TLB
1667 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1668 while (i-- > 0)
1669 {
1670 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1671 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1672 }
1673# endif
1674 }
1675#else
1676 NOREF(pVCpu);
1677#endif
1678}
1679
1680
1681/**
1682 * Invalidates the host physical aspects of the IEM TLBs.
1683 *
1684 * This is called internally as well as by PGM when moving GC mappings.
1685 *
1686 * @param pVM The cross context VM structure.
1687 *
1688 * @remarks Caller holds the PGM lock.
1689 */
1690VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1691{
1692 RT_NOREF_PV(pVM);
1693}
1694
1695#ifdef IEM_WITH_CODE_TLB
1696
1697/**
1698 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1699 * failure and jumps.
1700 *
1701 * We end up here for a number of reasons:
1702 * - pbInstrBuf isn't yet initialized.
1703 * - Advancing beyond the buffer boundrary (e.g. cross page).
1704 * - Advancing beyond the CS segment limit.
1705 * - Fetching from non-mappable page (e.g. MMIO).
1706 *
1707 * @param pVCpu The cross context virtual CPU structure of the
1708 * calling thread.
1709 * @param pvDst Where to return the bytes.
1710 * @param cbDst Number of bytes to read.
1711 *
1712 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1713 */
1714IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1715{
1716#ifdef IN_RING3
1717 for (;;)
1718 {
1719 Assert(cbDst <= 8);
1720 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1721
1722 /*
1723 * We might have a partial buffer match, deal with that first to make the
1724 * rest simpler. This is the first part of the cross page/buffer case.
1725 */
1726 if (pVCpu->iem.s.pbInstrBuf != NULL)
1727 {
1728 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1729 {
1730 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1731 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1732 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1733
1734 cbDst -= cbCopy;
1735 pvDst = (uint8_t *)pvDst + cbCopy;
1736 offBuf += cbCopy;
1737 pVCpu->iem.s.offInstrNextByte += offBuf;
1738 }
1739 }
1740
1741 /*
1742 * Check segment limit, figuring how much we're allowed to access at this point.
1743 *
1744 * We will fault immediately if RIP is past the segment limit / in non-canonical
1745 * territory. If we do continue, there are one or more bytes to read before we
1746 * end up in trouble and we need to do that first before faulting.
1747 */
1748 RTGCPTR GCPtrFirst;
1749 uint32_t cbMaxRead;
1750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1751 {
1752 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1753 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1754 { /* likely */ }
1755 else
1756 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1757 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1758 }
1759 else
1760 {
1761 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1762 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1763 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1764 { /* likely */ }
1765 else
1766 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1767 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1768 if (cbMaxRead != 0)
1769 { /* likely */ }
1770 else
1771 {
1772 /* Overflowed because address is 0 and limit is max. */
1773 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1774 cbMaxRead = X86_PAGE_SIZE;
1775 }
1776 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1777 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1778 if (cbMaxRead2 < cbMaxRead)
1779 cbMaxRead = cbMaxRead2;
1780 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1781 }
1782
1783 /*
1784 * Get the TLB entry for this piece of code.
1785 */
1786 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1787 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1788 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1789 if (pTlbe->uTag == uTag)
1790 {
1791 /* likely when executing lots of code, otherwise unlikely */
1792# ifdef VBOX_WITH_STATISTICS
1793 pVCpu->iem.s.CodeTlb.cTlbHits++;
1794# endif
1795 }
1796 else
1797 {
1798 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1799# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1800 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1801 {
1802 pTlbe->uTag = uTag;
1803 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1804 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1805 pTlbe->GCPhys = NIL_RTGCPHYS;
1806 pTlbe->pbMappingR3 = NULL;
1807 }
1808 else
1809# endif
1810 {
1811 RTGCPHYS GCPhys;
1812 uint64_t fFlags;
1813 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1814 if (RT_FAILURE(rc))
1815 {
1816 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1817 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1818 }
1819
1820 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1821 pTlbe->uTag = uTag;
1822 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1823 pTlbe->GCPhys = GCPhys;
1824 pTlbe->pbMappingR3 = NULL;
1825 }
1826 }
1827
1828 /*
1829 * Check TLB page table level access flags.
1830 */
1831 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1832 {
1833 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1834 {
1835 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1836 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1837 }
1838 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1839 {
1840 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1841 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1842 }
1843 }
1844
1845# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1846 /*
1847 * Allow interpretation of patch manager code blocks since they can for
1848 * instance throw #PFs for perfectly good reasons.
1849 */
1850 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1851 { /* no unlikely */ }
1852 else
1853 {
1854 /** @todo Could be optimized this a little in ring-3 if we liked. */
1855 size_t cbRead = 0;
1856 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1857 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1858 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1859 return;
1860 }
1861# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1862
1863 /*
1864 * Look up the physical page info if necessary.
1865 */
1866 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1867 { /* not necessary */ }
1868 else
1869 {
1870 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1871 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1872 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1873 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1874 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1875 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1876 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1877 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1878 }
1879
1880# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1881 /*
1882 * Try do a direct read using the pbMappingR3 pointer.
1883 */
1884 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1885 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1886 {
1887 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1888 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1889 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1890 {
1891 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1892 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1893 }
1894 else
1895 {
1896 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1897 Assert(cbInstr < cbMaxRead);
1898 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1899 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1900 }
1901 if (cbDst <= cbMaxRead)
1902 {
1903 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1904 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1905 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1906 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1907 return;
1908 }
1909 pVCpu->iem.s.pbInstrBuf = NULL;
1910
1911 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1912 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1913 }
1914 else
1915# endif
1916#if 0
1917 /*
1918 * If there is no special read handling, so we can read a bit more and
1919 * put it in the prefetch buffer.
1920 */
1921 if ( cbDst < cbMaxRead
1922 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1923 {
1924 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1925 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1926 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1927 { /* likely */ }
1928 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1929 {
1930 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1931 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1932 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1933 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1934 }
1935 else
1936 {
1937 Log((RT_SUCCESS(rcStrict)
1938 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1939 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1940 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1941 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1942 }
1943 }
1944 /*
1945 * Special read handling, so only read exactly what's needed.
1946 * This is a highly unlikely scenario.
1947 */
1948 else
1949#endif
1950 {
1951 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1952 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1953 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1954 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1955 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1956 { /* likely */ }
1957 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1958 {
1959 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1960 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1961 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1962 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1963 }
1964 else
1965 {
1966 Log((RT_SUCCESS(rcStrict)
1967 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1968 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1969 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1970 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1971 }
1972 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1973 if (cbToRead == cbDst)
1974 return;
1975 }
1976
1977 /*
1978 * More to read, loop.
1979 */
1980 cbDst -= cbMaxRead;
1981 pvDst = (uint8_t *)pvDst + cbMaxRead;
1982 }
1983#else
1984 RT_NOREF(pvDst, cbDst);
1985 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1986#endif
1987}
1988
1989#else
1990
1991/**
1992 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1993 * exception if it fails.
1994 *
1995 * @returns Strict VBox status code.
1996 * @param pVCpu The cross context virtual CPU structure of the
1997 * calling thread.
1998 * @param cbMin The minimum number of bytes relative offOpcode
1999 * that must be read.
2000 */
2001IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2002{
2003 /*
2004 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2005 *
2006 * First translate CS:rIP to a physical address.
2007 */
2008 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2009 uint32_t cbToTryRead;
2010 RTGCPTR GCPtrNext;
2011 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2012 {
2013 cbToTryRead = PAGE_SIZE;
2014 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2015 if (!IEM_IS_CANONICAL(GCPtrNext))
2016 return iemRaiseGeneralProtectionFault0(pVCpu);
2017 }
2018 else
2019 {
2020 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2021 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2022 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2023 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2024 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2025 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2026 if (!cbToTryRead) /* overflowed */
2027 {
2028 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2029 cbToTryRead = UINT32_MAX;
2030 /** @todo check out wrapping around the code segment. */
2031 }
2032 if (cbToTryRead < cbMin - cbLeft)
2033 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2034 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2035 }
2036
2037 /* Only read up to the end of the page, and make sure we don't read more
2038 than the opcode buffer can hold. */
2039 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2040 if (cbToTryRead > cbLeftOnPage)
2041 cbToTryRead = cbLeftOnPage;
2042 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2043 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2044/** @todo r=bird: Convert assertion into undefined opcode exception? */
2045 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2046
2047# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2048 /* Allow interpretation of patch manager code blocks since they can for
2049 instance throw #PFs for perfectly good reasons. */
2050 if (pVCpu->iem.s.fInPatchCode)
2051 {
2052 size_t cbRead = 0;
2053 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2054 AssertRCReturn(rc, rc);
2055 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2056 return VINF_SUCCESS;
2057 }
2058# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2059
2060 RTGCPHYS GCPhys;
2061 uint64_t fFlags;
2062 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2063 if (RT_FAILURE(rc))
2064 {
2065 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2066 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2067 }
2068 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2069 {
2070 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2071 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2072 }
2073 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2074 {
2075 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2076 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2077 }
2078 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2079 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2080 /** @todo Check reserved bits and such stuff. PGM is better at doing
2081 * that, so do it when implementing the guest virtual address
2082 * TLB... */
2083
2084 /*
2085 * Read the bytes at this address.
2086 *
2087 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2088 * and since PATM should only patch the start of an instruction there
2089 * should be no need to check again here.
2090 */
2091 if (!pVCpu->iem.s.fBypassHandlers)
2092 {
2093 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2094 cbToTryRead, PGMACCESSORIGIN_IEM);
2095 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2096 { /* likely */ }
2097 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2098 {
2099 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2100 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2101 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2102 }
2103 else
2104 {
2105 Log((RT_SUCCESS(rcStrict)
2106 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2107 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2108 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2109 return rcStrict;
2110 }
2111 }
2112 else
2113 {
2114 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2115 if (RT_SUCCESS(rc))
2116 { /* likely */ }
2117 else
2118 {
2119 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2120 return rc;
2121 }
2122 }
2123 pVCpu->iem.s.cbOpcode += cbToTryRead;
2124 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2125
2126 return VINF_SUCCESS;
2127}
2128
2129#endif /* !IEM_WITH_CODE_TLB */
2130#ifndef IEM_WITH_SETJMP
2131
2132/**
2133 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2134 *
2135 * @returns Strict VBox status code.
2136 * @param pVCpu The cross context virtual CPU structure of the
2137 * calling thread.
2138 * @param pb Where to return the opcode byte.
2139 */
2140DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2141{
2142 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2143 if (rcStrict == VINF_SUCCESS)
2144 {
2145 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2146 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2147 pVCpu->iem.s.offOpcode = offOpcode + 1;
2148 }
2149 else
2150 *pb = 0;
2151 return rcStrict;
2152}
2153
2154
2155/**
2156 * Fetches the next opcode byte.
2157 *
2158 * @returns Strict VBox status code.
2159 * @param pVCpu The cross context virtual CPU structure of the
2160 * calling thread.
2161 * @param pu8 Where to return the opcode byte.
2162 */
2163DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2164{
2165 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2166 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2167 {
2168 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2169 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2170 return VINF_SUCCESS;
2171 }
2172 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2173}
2174
2175#else /* IEM_WITH_SETJMP */
2176
2177/**
2178 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2179 *
2180 * @returns The opcode byte.
2181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2182 */
2183DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2184{
2185# ifdef IEM_WITH_CODE_TLB
2186 uint8_t u8;
2187 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2188 return u8;
2189# else
2190 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2191 if (rcStrict == VINF_SUCCESS)
2192 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2193 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2194# endif
2195}
2196
2197
2198/**
2199 * Fetches the next opcode byte, longjmp on error.
2200 *
2201 * @returns The opcode byte.
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 */
2204DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2205{
2206# ifdef IEM_WITH_CODE_TLB
2207 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2208 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2209 if (RT_LIKELY( pbBuf != NULL
2210 && offBuf < pVCpu->iem.s.cbInstrBuf))
2211 {
2212 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2213 return pbBuf[offBuf];
2214 }
2215# else
2216 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2217 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2218 {
2219 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2220 return pVCpu->iem.s.abOpcode[offOpcode];
2221 }
2222# endif
2223 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2224}
2225
2226#endif /* IEM_WITH_SETJMP */
2227
2228/**
2229 * Fetches the next opcode byte, returns automatically on failure.
2230 *
2231 * @param a_pu8 Where to return the opcode byte.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2239 if (rcStrict2 == VINF_SUCCESS) \
2240 { /* likely */ } \
2241 else \
2242 return rcStrict2; \
2243 } while (0)
2244#else
2245# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2246#endif /* IEM_WITH_SETJMP */
2247
2248
2249#ifndef IEM_WITH_SETJMP
2250/**
2251 * Fetches the next signed byte from the opcode stream.
2252 *
2253 * @returns Strict VBox status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2255 * @param pi8 Where to return the signed byte.
2256 */
2257DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2258{
2259 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2260}
2261#endif /* !IEM_WITH_SETJMP */
2262
2263
2264/**
2265 * Fetches the next signed byte from the opcode stream, returning automatically
2266 * on failure.
2267 *
2268 * @param a_pi8 Where to return the signed byte.
2269 * @remark Implicitly references pVCpu.
2270 */
2271#ifndef IEM_WITH_SETJMP
2272# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2273 do \
2274 { \
2275 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2276 if (rcStrict2 != VINF_SUCCESS) \
2277 return rcStrict2; \
2278 } while (0)
2279#else /* IEM_WITH_SETJMP */
2280# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2281
2282#endif /* IEM_WITH_SETJMP */
2283
2284#ifndef IEM_WITH_SETJMP
2285
2286/**
2287 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2288 *
2289 * @returns Strict VBox status code.
2290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2291 * @param pu16 Where to return the opcode dword.
2292 */
2293DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2294{
2295 uint8_t u8;
2296 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2297 if (rcStrict == VINF_SUCCESS)
2298 *pu16 = (int8_t)u8;
2299 return rcStrict;
2300}
2301
2302
2303/**
2304 * Fetches the next signed byte from the opcode stream, extending it to
2305 * unsigned 16-bit.
2306 *
2307 * @returns Strict VBox status code.
2308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2309 * @param pu16 Where to return the unsigned word.
2310 */
2311DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2312{
2313 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2314 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2315 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2316
2317 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2318 pVCpu->iem.s.offOpcode = offOpcode + 1;
2319 return VINF_SUCCESS;
2320}
2321
2322#endif /* !IEM_WITH_SETJMP */
2323
2324/**
2325 * Fetches the next signed byte from the opcode stream and sign-extending it to
2326 * a word, returning automatically on failure.
2327 *
2328 * @param a_pu16 Where to return the word.
2329 * @remark Implicitly references pVCpu.
2330 */
2331#ifndef IEM_WITH_SETJMP
2332# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2333 do \
2334 { \
2335 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2336 if (rcStrict2 != VINF_SUCCESS) \
2337 return rcStrict2; \
2338 } while (0)
2339#else
2340# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2341#endif
2342
2343#ifndef IEM_WITH_SETJMP
2344
2345/**
2346 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2347 *
2348 * @returns Strict VBox status code.
2349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2350 * @param pu32 Where to return the opcode dword.
2351 */
2352DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2353{
2354 uint8_t u8;
2355 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2356 if (rcStrict == VINF_SUCCESS)
2357 *pu32 = (int8_t)u8;
2358 return rcStrict;
2359}
2360
2361
2362/**
2363 * Fetches the next signed byte from the opcode stream, extending it to
2364 * unsigned 32-bit.
2365 *
2366 * @returns Strict VBox status code.
2367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2368 * @param pu32 Where to return the unsigned dword.
2369 */
2370DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2371{
2372 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2373 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2374 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2375
2376 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2377 pVCpu->iem.s.offOpcode = offOpcode + 1;
2378 return VINF_SUCCESS;
2379}
2380
2381#endif /* !IEM_WITH_SETJMP */
2382
2383/**
2384 * Fetches the next signed byte from the opcode stream and sign-extending it to
2385 * a word, returning automatically on failure.
2386 *
2387 * @param a_pu32 Where to return the word.
2388 * @remark Implicitly references pVCpu.
2389 */
2390#ifndef IEM_WITH_SETJMP
2391#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2392 do \
2393 { \
2394 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2395 if (rcStrict2 != VINF_SUCCESS) \
2396 return rcStrict2; \
2397 } while (0)
2398#else
2399# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2400#endif
2401
2402#ifndef IEM_WITH_SETJMP
2403
2404/**
2405 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2406 *
2407 * @returns Strict VBox status code.
2408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2409 * @param pu64 Where to return the opcode qword.
2410 */
2411DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2412{
2413 uint8_t u8;
2414 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2415 if (rcStrict == VINF_SUCCESS)
2416 *pu64 = (int8_t)u8;
2417 return rcStrict;
2418}
2419
2420
2421/**
2422 * Fetches the next signed byte from the opcode stream, extending it to
2423 * unsigned 64-bit.
2424 *
2425 * @returns Strict VBox status code.
2426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2427 * @param pu64 Where to return the unsigned qword.
2428 */
2429DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2430{
2431 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2432 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2433 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2434
2435 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2436 pVCpu->iem.s.offOpcode = offOpcode + 1;
2437 return VINF_SUCCESS;
2438}
2439
2440#endif /* !IEM_WITH_SETJMP */
2441
2442
2443/**
2444 * Fetches the next signed byte from the opcode stream and sign-extending it to
2445 * a word, returning automatically on failure.
2446 *
2447 * @param a_pu64 Where to return the word.
2448 * @remark Implicitly references pVCpu.
2449 */
2450#ifndef IEM_WITH_SETJMP
2451# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2452 do \
2453 { \
2454 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2455 if (rcStrict2 != VINF_SUCCESS) \
2456 return rcStrict2; \
2457 } while (0)
2458#else
2459# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2460#endif
2461
2462
2463#ifndef IEM_WITH_SETJMP
2464/**
2465 * Fetches the next opcode byte.
2466 *
2467 * @returns Strict VBox status code.
2468 * @param pVCpu The cross context virtual CPU structure of the
2469 * calling thread.
2470 * @param pu8 Where to return the opcode byte.
2471 */
2472DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2473{
2474 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2475 pVCpu->iem.s.offModRm = offOpcode;
2476 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2477 {
2478 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2479 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2480 return VINF_SUCCESS;
2481 }
2482 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2483}
2484#else /* IEM_WITH_SETJMP */
2485/**
2486 * Fetches the next opcode byte, longjmp on error.
2487 *
2488 * @returns The opcode byte.
2489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2490 */
2491DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2492{
2493# ifdef IEM_WITH_CODE_TLB
2494 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2495 pVCpu->iem.s.offModRm = offBuf;
2496 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2497 if (RT_LIKELY( pbBuf != NULL
2498 && offBuf < pVCpu->iem.s.cbInstrBuf))
2499 {
2500 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2501 return pbBuf[offBuf];
2502 }
2503# else
2504 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2505 pVCpu->iem.s.offModRm = offOpcode;
2506 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2507 {
2508 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2509 return pVCpu->iem.s.abOpcode[offOpcode];
2510 }
2511# endif
2512 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2513}
2514#endif /* IEM_WITH_SETJMP */
2515
2516/**
2517 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2518 * on failure.
2519 *
2520 * Will note down the position of the ModR/M byte for VT-x exits.
2521 *
2522 * @param a_pbRm Where to return the RM opcode byte.
2523 * @remark Implicitly references pVCpu.
2524 */
2525#ifndef IEM_WITH_SETJMP
2526# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2527 do \
2528 { \
2529 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2530 if (rcStrict2 == VINF_SUCCESS) \
2531 { /* likely */ } \
2532 else \
2533 return rcStrict2; \
2534 } while (0)
2535#else
2536# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2537#endif /* IEM_WITH_SETJMP */
2538
2539
2540#ifndef IEM_WITH_SETJMP
2541
2542/**
2543 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2544 *
2545 * @returns Strict VBox status code.
2546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2547 * @param pu16 Where to return the opcode word.
2548 */
2549DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2550{
2551 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2552 if (rcStrict == VINF_SUCCESS)
2553 {
2554 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2555# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2556 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2557# else
2558 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2559# endif
2560 pVCpu->iem.s.offOpcode = offOpcode + 2;
2561 }
2562 else
2563 *pu16 = 0;
2564 return rcStrict;
2565}
2566
2567
2568/**
2569 * Fetches the next opcode word.
2570 *
2571 * @returns Strict VBox status code.
2572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2573 * @param pu16 Where to return the opcode word.
2574 */
2575DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2576{
2577 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2578 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2579 {
2580 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2581# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2582 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2583# else
2584 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2585# endif
2586 return VINF_SUCCESS;
2587 }
2588 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2589}
2590
2591#else /* IEM_WITH_SETJMP */
2592
2593/**
2594 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2595 *
2596 * @returns The opcode word.
2597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2598 */
2599DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2600{
2601# ifdef IEM_WITH_CODE_TLB
2602 uint16_t u16;
2603 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2604 return u16;
2605# else
2606 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2607 if (rcStrict == VINF_SUCCESS)
2608 {
2609 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2610 pVCpu->iem.s.offOpcode += 2;
2611# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2612 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2613# else
2614 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2615# endif
2616 }
2617 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2618# endif
2619}
2620
2621
2622/**
2623 * Fetches the next opcode word, longjmp on error.
2624 *
2625 * @returns The opcode word.
2626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2627 */
2628DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2629{
2630# ifdef IEM_WITH_CODE_TLB
2631 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2632 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2633 if (RT_LIKELY( pbBuf != NULL
2634 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2635 {
2636 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2637# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2638 return *(uint16_t const *)&pbBuf[offBuf];
2639# else
2640 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2641# endif
2642 }
2643# else
2644 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2645 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2646 {
2647 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2648# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2649 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2650# else
2651 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2652# endif
2653 }
2654# endif
2655 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2656}
2657
2658#endif /* IEM_WITH_SETJMP */
2659
2660
2661/**
2662 * Fetches the next opcode word, returns automatically on failure.
2663 *
2664 * @param a_pu16 Where to return the opcode word.
2665 * @remark Implicitly references pVCpu.
2666 */
2667#ifndef IEM_WITH_SETJMP
2668# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2669 do \
2670 { \
2671 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2672 if (rcStrict2 != VINF_SUCCESS) \
2673 return rcStrict2; \
2674 } while (0)
2675#else
2676# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2677#endif
2678
2679#ifndef IEM_WITH_SETJMP
2680
2681/**
2682 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2683 *
2684 * @returns Strict VBox status code.
2685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2686 * @param pu32 Where to return the opcode double word.
2687 */
2688DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2689{
2690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2691 if (rcStrict == VINF_SUCCESS)
2692 {
2693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2694 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2695 pVCpu->iem.s.offOpcode = offOpcode + 2;
2696 }
2697 else
2698 *pu32 = 0;
2699 return rcStrict;
2700}
2701
2702
2703/**
2704 * Fetches the next opcode word, zero extending it to a double word.
2705 *
2706 * @returns Strict VBox status code.
2707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2708 * @param pu32 Where to return the opcode double word.
2709 */
2710DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2711{
2712 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2713 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2714 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2715
2716 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2717 pVCpu->iem.s.offOpcode = offOpcode + 2;
2718 return VINF_SUCCESS;
2719}
2720
2721#endif /* !IEM_WITH_SETJMP */
2722
2723
2724/**
2725 * Fetches the next opcode word and zero extends it to a double word, returns
2726 * automatically on failure.
2727 *
2728 * @param a_pu32 Where to return the opcode double word.
2729 * @remark Implicitly references pVCpu.
2730 */
2731#ifndef IEM_WITH_SETJMP
2732# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2733 do \
2734 { \
2735 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2736 if (rcStrict2 != VINF_SUCCESS) \
2737 return rcStrict2; \
2738 } while (0)
2739#else
2740# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2741#endif
2742
2743#ifndef IEM_WITH_SETJMP
2744
2745/**
2746 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2747 *
2748 * @returns Strict VBox status code.
2749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2750 * @param pu64 Where to return the opcode quad word.
2751 */
2752DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2753{
2754 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2755 if (rcStrict == VINF_SUCCESS)
2756 {
2757 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2758 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2759 pVCpu->iem.s.offOpcode = offOpcode + 2;
2760 }
2761 else
2762 *pu64 = 0;
2763 return rcStrict;
2764}
2765
2766
2767/**
2768 * Fetches the next opcode word, zero extending it to a quad word.
2769 *
2770 * @returns Strict VBox status code.
2771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2772 * @param pu64 Where to return the opcode quad word.
2773 */
2774DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2775{
2776 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2777 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2778 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2779
2780 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2781 pVCpu->iem.s.offOpcode = offOpcode + 2;
2782 return VINF_SUCCESS;
2783}
2784
2785#endif /* !IEM_WITH_SETJMP */
2786
2787/**
2788 * Fetches the next opcode word and zero extends it to a quad word, returns
2789 * automatically on failure.
2790 *
2791 * @param a_pu64 Where to return the opcode quad word.
2792 * @remark Implicitly references pVCpu.
2793 */
2794#ifndef IEM_WITH_SETJMP
2795# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2796 do \
2797 { \
2798 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2799 if (rcStrict2 != VINF_SUCCESS) \
2800 return rcStrict2; \
2801 } while (0)
2802#else
2803# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2804#endif
2805
2806
2807#ifndef IEM_WITH_SETJMP
2808/**
2809 * Fetches the next signed word from the opcode stream.
2810 *
2811 * @returns Strict VBox status code.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 * @param pi16 Where to return the signed word.
2814 */
2815DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2816{
2817 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2818}
2819#endif /* !IEM_WITH_SETJMP */
2820
2821
2822/**
2823 * Fetches the next signed word from the opcode stream, returning automatically
2824 * on failure.
2825 *
2826 * @param a_pi16 Where to return the signed word.
2827 * @remark Implicitly references pVCpu.
2828 */
2829#ifndef IEM_WITH_SETJMP
2830# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2831 do \
2832 { \
2833 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2834 if (rcStrict2 != VINF_SUCCESS) \
2835 return rcStrict2; \
2836 } while (0)
2837#else
2838# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2839#endif
2840
2841#ifndef IEM_WITH_SETJMP
2842
2843/**
2844 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2845 *
2846 * @returns Strict VBox status code.
2847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2848 * @param pu32 Where to return the opcode dword.
2849 */
2850DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2851{
2852 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2853 if (rcStrict == VINF_SUCCESS)
2854 {
2855 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2856# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2857 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2858# else
2859 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2860 pVCpu->iem.s.abOpcode[offOpcode + 1],
2861 pVCpu->iem.s.abOpcode[offOpcode + 2],
2862 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2863# endif
2864 pVCpu->iem.s.offOpcode = offOpcode + 4;
2865 }
2866 else
2867 *pu32 = 0;
2868 return rcStrict;
2869}
2870
2871
2872/**
2873 * Fetches the next opcode dword.
2874 *
2875 * @returns Strict VBox status code.
2876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2877 * @param pu32 Where to return the opcode double word.
2878 */
2879DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2880{
2881 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2882 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2883 {
2884 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2885# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2886 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2887# else
2888 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2889 pVCpu->iem.s.abOpcode[offOpcode + 1],
2890 pVCpu->iem.s.abOpcode[offOpcode + 2],
2891 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2892# endif
2893 return VINF_SUCCESS;
2894 }
2895 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2896}
2897
2898#else /* !IEM_WITH_SETJMP */
2899
2900/**
2901 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2902 *
2903 * @returns The opcode dword.
2904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2905 */
2906DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2907{
2908# ifdef IEM_WITH_CODE_TLB
2909 uint32_t u32;
2910 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2911 return u32;
2912# else
2913 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2914 if (rcStrict == VINF_SUCCESS)
2915 {
2916 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2917 pVCpu->iem.s.offOpcode = offOpcode + 4;
2918# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2919 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2920# else
2921 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2922 pVCpu->iem.s.abOpcode[offOpcode + 1],
2923 pVCpu->iem.s.abOpcode[offOpcode + 2],
2924 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2925# endif
2926 }
2927 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2928# endif
2929}
2930
2931
2932/**
2933 * Fetches the next opcode dword, longjmp on error.
2934 *
2935 * @returns The opcode dword.
2936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2937 */
2938DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2939{
2940# ifdef IEM_WITH_CODE_TLB
2941 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2942 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2943 if (RT_LIKELY( pbBuf != NULL
2944 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2945 {
2946 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2947# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2948 return *(uint32_t const *)&pbBuf[offBuf];
2949# else
2950 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2951 pbBuf[offBuf + 1],
2952 pbBuf[offBuf + 2],
2953 pbBuf[offBuf + 3]);
2954# endif
2955 }
2956# else
2957 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2958 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2959 {
2960 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2961# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2962 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2963# else
2964 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2965 pVCpu->iem.s.abOpcode[offOpcode + 1],
2966 pVCpu->iem.s.abOpcode[offOpcode + 2],
2967 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2968# endif
2969 }
2970# endif
2971 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2972}
2973
2974#endif /* !IEM_WITH_SETJMP */
2975
2976
2977/**
2978 * Fetches the next opcode dword, returns automatically on failure.
2979 *
2980 * @param a_pu32 Where to return the opcode dword.
2981 * @remark Implicitly references pVCpu.
2982 */
2983#ifndef IEM_WITH_SETJMP
2984# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2985 do \
2986 { \
2987 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2988 if (rcStrict2 != VINF_SUCCESS) \
2989 return rcStrict2; \
2990 } while (0)
2991#else
2992# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2993#endif
2994
2995#ifndef IEM_WITH_SETJMP
2996
2997/**
2998 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2999 *
3000 * @returns Strict VBox status code.
3001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3002 * @param pu64 Where to return the opcode dword.
3003 */
3004DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3005{
3006 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3007 if (rcStrict == VINF_SUCCESS)
3008 {
3009 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3010 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3011 pVCpu->iem.s.abOpcode[offOpcode + 1],
3012 pVCpu->iem.s.abOpcode[offOpcode + 2],
3013 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3014 pVCpu->iem.s.offOpcode = offOpcode + 4;
3015 }
3016 else
3017 *pu64 = 0;
3018 return rcStrict;
3019}
3020
3021
3022/**
3023 * Fetches the next opcode dword, zero extending it to a quad word.
3024 *
3025 * @returns Strict VBox status code.
3026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3027 * @param pu64 Where to return the opcode quad word.
3028 */
3029DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3030{
3031 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3032 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3033 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3034
3035 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3036 pVCpu->iem.s.abOpcode[offOpcode + 1],
3037 pVCpu->iem.s.abOpcode[offOpcode + 2],
3038 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3039 pVCpu->iem.s.offOpcode = offOpcode + 4;
3040 return VINF_SUCCESS;
3041}
3042
3043#endif /* !IEM_WITH_SETJMP */
3044
3045
3046/**
3047 * Fetches the next opcode dword and zero extends it to a quad word, returns
3048 * automatically on failure.
3049 *
3050 * @param a_pu64 Where to return the opcode quad word.
3051 * @remark Implicitly references pVCpu.
3052 */
3053#ifndef IEM_WITH_SETJMP
3054# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3055 do \
3056 { \
3057 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3058 if (rcStrict2 != VINF_SUCCESS) \
3059 return rcStrict2; \
3060 } while (0)
3061#else
3062# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3063#endif
3064
3065
3066#ifndef IEM_WITH_SETJMP
3067/**
3068 * Fetches the next signed double word from the opcode stream.
3069 *
3070 * @returns Strict VBox status code.
3071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3072 * @param pi32 Where to return the signed double word.
3073 */
3074DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3075{
3076 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3077}
3078#endif
3079
3080/**
3081 * Fetches the next signed double word from the opcode stream, returning
3082 * automatically on failure.
3083 *
3084 * @param a_pi32 Where to return the signed double word.
3085 * @remark Implicitly references pVCpu.
3086 */
3087#ifndef IEM_WITH_SETJMP
3088# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3089 do \
3090 { \
3091 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3092 if (rcStrict2 != VINF_SUCCESS) \
3093 return rcStrict2; \
3094 } while (0)
3095#else
3096# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3097#endif
3098
3099#ifndef IEM_WITH_SETJMP
3100
3101/**
3102 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3103 *
3104 * @returns Strict VBox status code.
3105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3106 * @param pu64 Where to return the opcode qword.
3107 */
3108DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3109{
3110 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3111 if (rcStrict == VINF_SUCCESS)
3112 {
3113 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3114 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3115 pVCpu->iem.s.abOpcode[offOpcode + 1],
3116 pVCpu->iem.s.abOpcode[offOpcode + 2],
3117 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3118 pVCpu->iem.s.offOpcode = offOpcode + 4;
3119 }
3120 else
3121 *pu64 = 0;
3122 return rcStrict;
3123}
3124
3125
3126/**
3127 * Fetches the next opcode dword, sign extending it into a quad word.
3128 *
3129 * @returns Strict VBox status code.
3130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3131 * @param pu64 Where to return the opcode quad word.
3132 */
3133DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3134{
3135 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3136 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3137 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3138
3139 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3140 pVCpu->iem.s.abOpcode[offOpcode + 1],
3141 pVCpu->iem.s.abOpcode[offOpcode + 2],
3142 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3143 *pu64 = i32;
3144 pVCpu->iem.s.offOpcode = offOpcode + 4;
3145 return VINF_SUCCESS;
3146}
3147
3148#endif /* !IEM_WITH_SETJMP */
3149
3150
3151/**
3152 * Fetches the next opcode double word and sign extends it to a quad word,
3153 * returns automatically on failure.
3154 *
3155 * @param a_pu64 Where to return the opcode quad word.
3156 * @remark Implicitly references pVCpu.
3157 */
3158#ifndef IEM_WITH_SETJMP
3159# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3160 do \
3161 { \
3162 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3163 if (rcStrict2 != VINF_SUCCESS) \
3164 return rcStrict2; \
3165 } while (0)
3166#else
3167# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3168#endif
3169
3170#ifndef IEM_WITH_SETJMP
3171
3172/**
3173 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3174 *
3175 * @returns Strict VBox status code.
3176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3177 * @param pu64 Where to return the opcode qword.
3178 */
3179DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3180{
3181 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3182 if (rcStrict == VINF_SUCCESS)
3183 {
3184 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3185# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3186 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3187# else
3188 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3189 pVCpu->iem.s.abOpcode[offOpcode + 1],
3190 pVCpu->iem.s.abOpcode[offOpcode + 2],
3191 pVCpu->iem.s.abOpcode[offOpcode + 3],
3192 pVCpu->iem.s.abOpcode[offOpcode + 4],
3193 pVCpu->iem.s.abOpcode[offOpcode + 5],
3194 pVCpu->iem.s.abOpcode[offOpcode + 6],
3195 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3196# endif
3197 pVCpu->iem.s.offOpcode = offOpcode + 8;
3198 }
3199 else
3200 *pu64 = 0;
3201 return rcStrict;
3202}
3203
3204
3205/**
3206 * Fetches the next opcode qword.
3207 *
3208 * @returns Strict VBox status code.
3209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3210 * @param pu64 Where to return the opcode qword.
3211 */
3212DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3213{
3214 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3215 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3216 {
3217# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3218 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3219# else
3220 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3221 pVCpu->iem.s.abOpcode[offOpcode + 1],
3222 pVCpu->iem.s.abOpcode[offOpcode + 2],
3223 pVCpu->iem.s.abOpcode[offOpcode + 3],
3224 pVCpu->iem.s.abOpcode[offOpcode + 4],
3225 pVCpu->iem.s.abOpcode[offOpcode + 5],
3226 pVCpu->iem.s.abOpcode[offOpcode + 6],
3227 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3228# endif
3229 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3230 return VINF_SUCCESS;
3231 }
3232 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3233}
3234
3235#else /* IEM_WITH_SETJMP */
3236
3237/**
3238 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3239 *
3240 * @returns The opcode qword.
3241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3242 */
3243DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3244{
3245# ifdef IEM_WITH_CODE_TLB
3246 uint64_t u64;
3247 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3248 return u64;
3249# else
3250 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3251 if (rcStrict == VINF_SUCCESS)
3252 {
3253 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3254 pVCpu->iem.s.offOpcode = offOpcode + 8;
3255# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3256 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3257# else
3258 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3259 pVCpu->iem.s.abOpcode[offOpcode + 1],
3260 pVCpu->iem.s.abOpcode[offOpcode + 2],
3261 pVCpu->iem.s.abOpcode[offOpcode + 3],
3262 pVCpu->iem.s.abOpcode[offOpcode + 4],
3263 pVCpu->iem.s.abOpcode[offOpcode + 5],
3264 pVCpu->iem.s.abOpcode[offOpcode + 6],
3265 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3266# endif
3267 }
3268 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3269# endif
3270}
3271
3272
3273/**
3274 * Fetches the next opcode qword, longjmp on error.
3275 *
3276 * @returns The opcode qword.
3277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3278 */
3279DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3280{
3281# ifdef IEM_WITH_CODE_TLB
3282 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3283 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3284 if (RT_LIKELY( pbBuf != NULL
3285 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3286 {
3287 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3288# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3289 return *(uint64_t const *)&pbBuf[offBuf];
3290# else
3291 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3292 pbBuf[offBuf + 1],
3293 pbBuf[offBuf + 2],
3294 pbBuf[offBuf + 3],
3295 pbBuf[offBuf + 4],
3296 pbBuf[offBuf + 5],
3297 pbBuf[offBuf + 6],
3298 pbBuf[offBuf + 7]);
3299# endif
3300 }
3301# else
3302 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3303 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3304 {
3305 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3306# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3307 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3308# else
3309 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3310 pVCpu->iem.s.abOpcode[offOpcode + 1],
3311 pVCpu->iem.s.abOpcode[offOpcode + 2],
3312 pVCpu->iem.s.abOpcode[offOpcode + 3],
3313 pVCpu->iem.s.abOpcode[offOpcode + 4],
3314 pVCpu->iem.s.abOpcode[offOpcode + 5],
3315 pVCpu->iem.s.abOpcode[offOpcode + 6],
3316 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3317# endif
3318 }
3319# endif
3320 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3321}
3322
3323#endif /* IEM_WITH_SETJMP */
3324
3325/**
3326 * Fetches the next opcode quad word, returns automatically on failure.
3327 *
3328 * @param a_pu64 Where to return the opcode quad word.
3329 * @remark Implicitly references pVCpu.
3330 */
3331#ifndef IEM_WITH_SETJMP
3332# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3333 do \
3334 { \
3335 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3336 if (rcStrict2 != VINF_SUCCESS) \
3337 return rcStrict2; \
3338 } while (0)
3339#else
3340# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3341#endif
3342
3343
3344/** @name Misc Worker Functions.
3345 * @{
3346 */
3347
3348/**
3349 * Gets the exception class for the specified exception vector.
3350 *
3351 * @returns The class of the specified exception.
3352 * @param uVector The exception vector.
3353 */
3354IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3355{
3356 Assert(uVector <= X86_XCPT_LAST);
3357 switch (uVector)
3358 {
3359 case X86_XCPT_DE:
3360 case X86_XCPT_TS:
3361 case X86_XCPT_NP:
3362 case X86_XCPT_SS:
3363 case X86_XCPT_GP:
3364 case X86_XCPT_SX: /* AMD only */
3365 return IEMXCPTCLASS_CONTRIBUTORY;
3366
3367 case X86_XCPT_PF:
3368 case X86_XCPT_VE: /* Intel only */
3369 return IEMXCPTCLASS_PAGE_FAULT;
3370
3371 case X86_XCPT_DF:
3372 return IEMXCPTCLASS_DOUBLE_FAULT;
3373 }
3374 return IEMXCPTCLASS_BENIGN;
3375}
3376
3377
3378/**
3379 * Evaluates how to handle an exception caused during delivery of another event
3380 * (exception / interrupt).
3381 *
3382 * @returns How to handle the recursive exception.
3383 * @param pVCpu The cross context virtual CPU structure of the
3384 * calling thread.
3385 * @param fPrevFlags The flags of the previous event.
3386 * @param uPrevVector The vector of the previous event.
3387 * @param fCurFlags The flags of the current exception.
3388 * @param uCurVector The vector of the current exception.
3389 * @param pfXcptRaiseInfo Where to store additional information about the
3390 * exception condition. Optional.
3391 */
3392VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3393 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3394{
3395 /*
3396 * Only CPU exceptions can be raised while delivering other events, software interrupt
3397 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3398 */
3399 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3400 Assert(pVCpu); RT_NOREF(pVCpu);
3401 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3402
3403 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3404 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3405 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3406 {
3407 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3408 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3409 {
3410 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3411 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3412 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3413 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3414 {
3415 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3416 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3417 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3418 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3419 uCurVector, pVCpu->cpum.GstCtx.cr2));
3420 }
3421 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3422 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3423 {
3424 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3425 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3426 }
3427 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3428 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3429 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3430 {
3431 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3432 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3433 }
3434 }
3435 else
3436 {
3437 if (uPrevVector == X86_XCPT_NMI)
3438 {
3439 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3440 if (uCurVector == X86_XCPT_PF)
3441 {
3442 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3443 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3444 }
3445 }
3446 else if ( uPrevVector == X86_XCPT_AC
3447 && uCurVector == X86_XCPT_AC)
3448 {
3449 enmRaise = IEMXCPTRAISE_CPU_HANG;
3450 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3451 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3452 }
3453 }
3454 }
3455 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3456 {
3457 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3458 if (uCurVector == X86_XCPT_PF)
3459 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3460 }
3461 else
3462 {
3463 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3464 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3465 }
3466
3467 if (pfXcptRaiseInfo)
3468 *pfXcptRaiseInfo = fRaiseInfo;
3469 return enmRaise;
3470}
3471
3472
3473/**
3474 * Enters the CPU shutdown state initiated by a triple fault or other
3475 * unrecoverable conditions.
3476 *
3477 * @returns Strict VBox status code.
3478 * @param pVCpu The cross context virtual CPU structure of the
3479 * calling thread.
3480 */
3481IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3482{
3483 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3484 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3485
3486 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3487 {
3488 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3489 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3490 }
3491
3492 RT_NOREF(pVCpu);
3493 return VINF_EM_TRIPLE_FAULT;
3494}
3495
3496
3497/**
3498 * Validates a new SS segment.
3499 *
3500 * @returns VBox strict status code.
3501 * @param pVCpu The cross context virtual CPU structure of the
3502 * calling thread.
3503 * @param NewSS The new SS selctor.
3504 * @param uCpl The CPL to load the stack for.
3505 * @param pDesc Where to return the descriptor.
3506 */
3507IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3508{
3509 /* Null selectors are not allowed (we're not called for dispatching
3510 interrupts with SS=0 in long mode). */
3511 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3512 {
3513 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3514 return iemRaiseTaskSwitchFault0(pVCpu);
3515 }
3516
3517 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3518 if ((NewSS & X86_SEL_RPL) != uCpl)
3519 {
3520 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3521 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3522 }
3523
3524 /*
3525 * Read the descriptor.
3526 */
3527 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3528 if (rcStrict != VINF_SUCCESS)
3529 return rcStrict;
3530
3531 /*
3532 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3533 */
3534 if (!pDesc->Legacy.Gen.u1DescType)
3535 {
3536 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3537 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3538 }
3539
3540 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3541 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3542 {
3543 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3544 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3545 }
3546 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3547 {
3548 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3549 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3550 }
3551
3552 /* Is it there? */
3553 /** @todo testcase: Is this checked before the canonical / limit check below? */
3554 if (!pDesc->Legacy.Gen.u1Present)
3555 {
3556 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3557 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3558 }
3559
3560 return VINF_SUCCESS;
3561}
3562
3563
3564/**
3565 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3566 * not.
3567 *
3568 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3569 */
3570#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3571# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3572#else
3573# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3574#endif
3575
3576/**
3577 * Updates the EFLAGS in the correct manner wrt. PATM.
3578 *
3579 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3580 * @param a_fEfl The new EFLAGS.
3581 */
3582#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3583# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3584#else
3585# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3586#endif
3587
3588
3589/** @} */
3590
3591/** @name Raising Exceptions.
3592 *
3593 * @{
3594 */
3595
3596
3597/**
3598 * Loads the specified stack far pointer from the TSS.
3599 *
3600 * @returns VBox strict status code.
3601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3602 * @param uCpl The CPL to load the stack for.
3603 * @param pSelSS Where to return the new stack segment.
3604 * @param puEsp Where to return the new stack pointer.
3605 */
3606IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3607{
3608 VBOXSTRICTRC rcStrict;
3609 Assert(uCpl < 4);
3610
3611 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3612 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3613 {
3614 /*
3615 * 16-bit TSS (X86TSS16).
3616 */
3617 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3618 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3619 {
3620 uint32_t off = uCpl * 4 + 2;
3621 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3622 {
3623 /** @todo check actual access pattern here. */
3624 uint32_t u32Tmp = 0; /* gcc maybe... */
3625 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3626 if (rcStrict == VINF_SUCCESS)
3627 {
3628 *puEsp = RT_LOWORD(u32Tmp);
3629 *pSelSS = RT_HIWORD(u32Tmp);
3630 return VINF_SUCCESS;
3631 }
3632 }
3633 else
3634 {
3635 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3636 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3637 }
3638 break;
3639 }
3640
3641 /*
3642 * 32-bit TSS (X86TSS32).
3643 */
3644 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3645 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3646 {
3647 uint32_t off = uCpl * 8 + 4;
3648 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3649 {
3650/** @todo check actual access pattern here. */
3651 uint64_t u64Tmp;
3652 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3653 if (rcStrict == VINF_SUCCESS)
3654 {
3655 *puEsp = u64Tmp & UINT32_MAX;
3656 *pSelSS = (RTSEL)(u64Tmp >> 32);
3657 return VINF_SUCCESS;
3658 }
3659 }
3660 else
3661 {
3662 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3663 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3664 }
3665 break;
3666 }
3667
3668 default:
3669 AssertFailed();
3670 rcStrict = VERR_IEM_IPE_4;
3671 break;
3672 }
3673
3674 *puEsp = 0; /* make gcc happy */
3675 *pSelSS = 0; /* make gcc happy */
3676 return rcStrict;
3677}
3678
3679
3680/**
3681 * Loads the specified stack pointer from the 64-bit TSS.
3682 *
3683 * @returns VBox strict status code.
3684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3685 * @param uCpl The CPL to load the stack for.
3686 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3687 * @param puRsp Where to return the new stack pointer.
3688 */
3689IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3690{
3691 Assert(uCpl < 4);
3692 Assert(uIst < 8);
3693 *puRsp = 0; /* make gcc happy */
3694
3695 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3696 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3697
3698 uint32_t off;
3699 if (uIst)
3700 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3701 else
3702 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3703 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3704 {
3705 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3706 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3707 }
3708
3709 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3710}
3711
3712
3713/**
3714 * Adjust the CPU state according to the exception being raised.
3715 *
3716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3717 * @param u8Vector The exception that has been raised.
3718 */
3719DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3720{
3721 switch (u8Vector)
3722 {
3723 case X86_XCPT_DB:
3724 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3725 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3726 break;
3727 /** @todo Read the AMD and Intel exception reference... */
3728 }
3729}
3730
3731
3732/**
3733 * Implements exceptions and interrupts for real mode.
3734 *
3735 * @returns VBox strict status code.
3736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3737 * @param cbInstr The number of bytes to offset rIP by in the return
3738 * address.
3739 * @param u8Vector The interrupt / exception vector number.
3740 * @param fFlags The flags.
3741 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3742 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3743 */
3744IEM_STATIC VBOXSTRICTRC
3745iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3746 uint8_t cbInstr,
3747 uint8_t u8Vector,
3748 uint32_t fFlags,
3749 uint16_t uErr,
3750 uint64_t uCr2)
3751{
3752 NOREF(uErr); NOREF(uCr2);
3753 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3754
3755 /*
3756 * Read the IDT entry.
3757 */
3758 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3759 {
3760 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3761 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3762 }
3763 RTFAR16 Idte;
3764 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3765 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3766 {
3767 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3768 return rcStrict;
3769 }
3770
3771 /*
3772 * Push the stack frame.
3773 */
3774 uint16_t *pu16Frame;
3775 uint64_t uNewRsp;
3776 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3777 if (rcStrict != VINF_SUCCESS)
3778 return rcStrict;
3779
3780 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3781#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3782 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3783 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3784 fEfl |= UINT16_C(0xf000);
3785#endif
3786 pu16Frame[2] = (uint16_t)fEfl;
3787 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3788 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3789 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3790 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3791 return rcStrict;
3792
3793 /*
3794 * Load the vector address into cs:ip and make exception specific state
3795 * adjustments.
3796 */
3797 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3798 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3799 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3800 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3801 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3802 pVCpu->cpum.GstCtx.rip = Idte.off;
3803 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3804 IEMMISC_SET_EFL(pVCpu, fEfl);
3805
3806 /** @todo do we actually do this in real mode? */
3807 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3808 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3809
3810 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3811}
3812
3813
3814/**
3815 * Loads a NULL data selector into when coming from V8086 mode.
3816 *
3817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3818 * @param pSReg Pointer to the segment register.
3819 */
3820IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3821{
3822 pSReg->Sel = 0;
3823 pSReg->ValidSel = 0;
3824 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3825 {
3826 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3827 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3828 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3829 }
3830 else
3831 {
3832 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3833 /** @todo check this on AMD-V */
3834 pSReg->u64Base = 0;
3835 pSReg->u32Limit = 0;
3836 }
3837}
3838
3839
3840/**
3841 * Loads a segment selector during a task switch in V8086 mode.
3842 *
3843 * @param pSReg Pointer to the segment register.
3844 * @param uSel The selector value to load.
3845 */
3846IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3847{
3848 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3849 pSReg->Sel = uSel;
3850 pSReg->ValidSel = uSel;
3851 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3852 pSReg->u64Base = uSel << 4;
3853 pSReg->u32Limit = 0xffff;
3854 pSReg->Attr.u = 0xf3;
3855}
3856
3857
3858/**
3859 * Loads a NULL data selector into a selector register, both the hidden and
3860 * visible parts, in protected mode.
3861 *
3862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3863 * @param pSReg Pointer to the segment register.
3864 * @param uRpl The RPL.
3865 */
3866IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3867{
3868 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3869 * data selector in protected mode. */
3870 pSReg->Sel = uRpl;
3871 pSReg->ValidSel = uRpl;
3872 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3873 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3874 {
3875 /* VT-x (Intel 3960x) observed doing something like this. */
3876 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3877 pSReg->u32Limit = UINT32_MAX;
3878 pSReg->u64Base = 0;
3879 }
3880 else
3881 {
3882 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3883 pSReg->u32Limit = 0;
3884 pSReg->u64Base = 0;
3885 }
3886}
3887
3888
3889/**
3890 * Loads a segment selector during a task switch in protected mode.
3891 *
3892 * In this task switch scenario, we would throw \#TS exceptions rather than
3893 * \#GPs.
3894 *
3895 * @returns VBox strict status code.
3896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3897 * @param pSReg Pointer to the segment register.
3898 * @param uSel The new selector value.
3899 *
3900 * @remarks This does _not_ handle CS or SS.
3901 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3902 */
3903IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3904{
3905 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3906
3907 /* Null data selector. */
3908 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3909 {
3910 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3911 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3912 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3913 return VINF_SUCCESS;
3914 }
3915
3916 /* Fetch the descriptor. */
3917 IEMSELDESC Desc;
3918 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3919 if (rcStrict != VINF_SUCCESS)
3920 {
3921 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3922 VBOXSTRICTRC_VAL(rcStrict)));
3923 return rcStrict;
3924 }
3925
3926 /* Must be a data segment or readable code segment. */
3927 if ( !Desc.Legacy.Gen.u1DescType
3928 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3929 {
3930 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3931 Desc.Legacy.Gen.u4Type));
3932 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3933 }
3934
3935 /* Check privileges for data segments and non-conforming code segments. */
3936 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3937 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3938 {
3939 /* The RPL and the new CPL must be less than or equal to the DPL. */
3940 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3941 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3942 {
3943 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3944 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3945 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3946 }
3947 }
3948
3949 /* Is it there? */
3950 if (!Desc.Legacy.Gen.u1Present)
3951 {
3952 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3953 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3954 }
3955
3956 /* The base and limit. */
3957 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3958 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3959
3960 /*
3961 * Ok, everything checked out fine. Now set the accessed bit before
3962 * committing the result into the registers.
3963 */
3964 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3965 {
3966 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3967 if (rcStrict != VINF_SUCCESS)
3968 return rcStrict;
3969 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3970 }
3971
3972 /* Commit */
3973 pSReg->Sel = uSel;
3974 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3975 pSReg->u32Limit = cbLimit;
3976 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3977 pSReg->ValidSel = uSel;
3978 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3979 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3980 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3981
3982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3983 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3984 return VINF_SUCCESS;
3985}
3986
3987
3988/**
3989 * Performs a task switch.
3990 *
3991 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3992 * caller is responsible for performing the necessary checks (like DPL, TSS
3993 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3994 * reference for JMP, CALL, IRET.
3995 *
3996 * If the task switch is the due to a software interrupt or hardware exception,
3997 * the caller is responsible for validating the TSS selector and descriptor. See
3998 * Intel Instruction reference for INT n.
3999 *
4000 * @returns VBox strict status code.
4001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4002 * @param enmTaskSwitch The cause of the task switch.
4003 * @param uNextEip The EIP effective after the task switch.
4004 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4005 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4006 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4007 * @param SelTSS The TSS selector of the new task.
4008 * @param pNewDescTSS Pointer to the new TSS descriptor.
4009 */
4010IEM_STATIC VBOXSTRICTRC
4011iemTaskSwitch(PVMCPU pVCpu,
4012 IEMTASKSWITCH enmTaskSwitch,
4013 uint32_t uNextEip,
4014 uint32_t fFlags,
4015 uint16_t uErr,
4016 uint64_t uCr2,
4017 RTSEL SelTSS,
4018 PIEMSELDESC pNewDescTSS)
4019{
4020 Assert(!IEM_IS_REAL_MODE(pVCpu));
4021 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4022 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4023
4024 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4025 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4026 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4027 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4028 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4029
4030 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4031 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4032
4033 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4034 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4035
4036 /* Update CR2 in case it's a page-fault. */
4037 /** @todo This should probably be done much earlier in IEM/PGM. See
4038 * @bugref{5653#c49}. */
4039 if (fFlags & IEM_XCPT_FLAGS_CR2)
4040 pVCpu->cpum.GstCtx.cr2 = uCr2;
4041
4042 /*
4043 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4044 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4045 */
4046 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4047 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4048 if (uNewTSSLimit < uNewTSSLimitMin)
4049 {
4050 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4051 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4052 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4053 }
4054
4055 /*
4056 * Task switches in VMX non-root mode always cause task switches.
4057 * The new TSS must have been read and validated (DPL, limits etc.) before a
4058 * task-switch VM-exit commences.
4059 *
4060 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4061 */
4062 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4063 {
4064 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4065 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4066 }
4067
4068 /*
4069 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4070 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4071 */
4072 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4073 {
4074 uint32_t const uExitInfo1 = SelTSS;
4075 uint32_t uExitInfo2 = uErr;
4076 switch (enmTaskSwitch)
4077 {
4078 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4079 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4080 default: break;
4081 }
4082 if (fFlags & IEM_XCPT_FLAGS_ERR)
4083 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4084 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4085 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4086
4087 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4088 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4089 RT_NOREF2(uExitInfo1, uExitInfo2);
4090 }
4091
4092 /*
4093 * Check the current TSS limit. The last written byte to the current TSS during the
4094 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4095 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4096 *
4097 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4098 * end up with smaller than "legal" TSS limits.
4099 */
4100 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4101 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4102 if (uCurTSSLimit < uCurTSSLimitMin)
4103 {
4104 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4105 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4106 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4107 }
4108
4109 /*
4110 * Verify that the new TSS can be accessed and map it. Map only the required contents
4111 * and not the entire TSS.
4112 */
4113 void *pvNewTSS;
4114 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4115 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4116 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4117 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4118 * not perform correct translation if this happens. See Intel spec. 7.2.1
4119 * "Task-State Segment" */
4120 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4121 if (rcStrict != VINF_SUCCESS)
4122 {
4123 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4124 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4125 return rcStrict;
4126 }
4127
4128 /*
4129 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4130 */
4131 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4132 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4133 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4134 {
4135 PX86DESC pDescCurTSS;
4136 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4137 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4138 if (rcStrict != VINF_SUCCESS)
4139 {
4140 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4141 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4142 return rcStrict;
4143 }
4144
4145 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4146 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4147 if (rcStrict != VINF_SUCCESS)
4148 {
4149 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4150 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4151 return rcStrict;
4152 }
4153
4154 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4155 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4156 {
4157 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4158 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4159 u32EFlags &= ~X86_EFL_NT;
4160 }
4161 }
4162
4163 /*
4164 * Save the CPU state into the current TSS.
4165 */
4166 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4167 if (GCPtrNewTSS == GCPtrCurTSS)
4168 {
4169 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4170 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4171 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4172 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4173 pVCpu->cpum.GstCtx.ldtr.Sel));
4174 }
4175 if (fIsNewTSS386)
4176 {
4177 /*
4178 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4179 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4180 */
4181 void *pvCurTSS32;
4182 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4183 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4184 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4185 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4186 if (rcStrict != VINF_SUCCESS)
4187 {
4188 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4189 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4190 return rcStrict;
4191 }
4192
4193 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4194 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4195 pCurTSS32->eip = uNextEip;
4196 pCurTSS32->eflags = u32EFlags;
4197 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4198 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4199 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4200 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4201 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4202 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4203 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4204 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4205 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4206 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4207 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4208 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4209 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4210 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4211
4212 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4213 if (rcStrict != VINF_SUCCESS)
4214 {
4215 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4216 VBOXSTRICTRC_VAL(rcStrict)));
4217 return rcStrict;
4218 }
4219 }
4220 else
4221 {
4222 /*
4223 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4224 */
4225 void *pvCurTSS16;
4226 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4227 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4228 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4229 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4230 if (rcStrict != VINF_SUCCESS)
4231 {
4232 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4233 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4234 return rcStrict;
4235 }
4236
4237 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4238 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4239 pCurTSS16->ip = uNextEip;
4240 pCurTSS16->flags = u32EFlags;
4241 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4242 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4243 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4244 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4245 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4246 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4247 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4248 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4249 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4250 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4251 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4252 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4253
4254 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4255 if (rcStrict != VINF_SUCCESS)
4256 {
4257 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4258 VBOXSTRICTRC_VAL(rcStrict)));
4259 return rcStrict;
4260 }
4261 }
4262
4263 /*
4264 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4265 */
4266 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4267 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4268 {
4269 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4270 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4271 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4272 }
4273
4274 /*
4275 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4276 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4277 */
4278 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4279 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4280 bool fNewDebugTrap;
4281 if (fIsNewTSS386)
4282 {
4283 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4284 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4285 uNewEip = pNewTSS32->eip;
4286 uNewEflags = pNewTSS32->eflags;
4287 uNewEax = pNewTSS32->eax;
4288 uNewEcx = pNewTSS32->ecx;
4289 uNewEdx = pNewTSS32->edx;
4290 uNewEbx = pNewTSS32->ebx;
4291 uNewEsp = pNewTSS32->esp;
4292 uNewEbp = pNewTSS32->ebp;
4293 uNewEsi = pNewTSS32->esi;
4294 uNewEdi = pNewTSS32->edi;
4295 uNewES = pNewTSS32->es;
4296 uNewCS = pNewTSS32->cs;
4297 uNewSS = pNewTSS32->ss;
4298 uNewDS = pNewTSS32->ds;
4299 uNewFS = pNewTSS32->fs;
4300 uNewGS = pNewTSS32->gs;
4301 uNewLdt = pNewTSS32->selLdt;
4302 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4303 }
4304 else
4305 {
4306 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4307 uNewCr3 = 0;
4308 uNewEip = pNewTSS16->ip;
4309 uNewEflags = pNewTSS16->flags;
4310 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4311 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4312 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4313 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4314 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4315 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4316 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4317 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4318 uNewES = pNewTSS16->es;
4319 uNewCS = pNewTSS16->cs;
4320 uNewSS = pNewTSS16->ss;
4321 uNewDS = pNewTSS16->ds;
4322 uNewFS = 0;
4323 uNewGS = 0;
4324 uNewLdt = pNewTSS16->selLdt;
4325 fNewDebugTrap = false;
4326 }
4327
4328 if (GCPtrNewTSS == GCPtrCurTSS)
4329 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4330 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4331
4332 /*
4333 * We're done accessing the new TSS.
4334 */
4335 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4336 if (rcStrict != VINF_SUCCESS)
4337 {
4338 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4339 return rcStrict;
4340 }
4341
4342 /*
4343 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4344 */
4345 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4346 {
4347 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4348 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4349 if (rcStrict != VINF_SUCCESS)
4350 {
4351 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4352 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4353 return rcStrict;
4354 }
4355
4356 /* Check that the descriptor indicates the new TSS is available (not busy). */
4357 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4358 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4359 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4360
4361 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4362 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4363 if (rcStrict != VINF_SUCCESS)
4364 {
4365 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4366 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4367 return rcStrict;
4368 }
4369 }
4370
4371 /*
4372 * From this point on, we're technically in the new task. We will defer exceptions
4373 * until the completion of the task switch but before executing any instructions in the new task.
4374 */
4375 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4376 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4377 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4378 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4379 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4380 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4381 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4382
4383 /* Set the busy bit in TR. */
4384 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4385 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4386 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4387 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4388 {
4389 uNewEflags |= X86_EFL_NT;
4390 }
4391
4392 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4393 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4394 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4395
4396 pVCpu->cpum.GstCtx.eip = uNewEip;
4397 pVCpu->cpum.GstCtx.eax = uNewEax;
4398 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4399 pVCpu->cpum.GstCtx.edx = uNewEdx;
4400 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4401 pVCpu->cpum.GstCtx.esp = uNewEsp;
4402 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4403 pVCpu->cpum.GstCtx.esi = uNewEsi;
4404 pVCpu->cpum.GstCtx.edi = uNewEdi;
4405
4406 uNewEflags &= X86_EFL_LIVE_MASK;
4407 uNewEflags |= X86_EFL_RA1_MASK;
4408 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4409
4410 /*
4411 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4412 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4413 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4414 */
4415 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4416 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4417
4418 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4419 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4420
4421 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4422 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4423
4424 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4425 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4426
4427 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4428 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4429
4430 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4431 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4432 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4433
4434 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4435 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4436 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4437 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4438
4439 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4440 {
4441 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4442 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4443 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4444 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4445 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4446 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4447 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4448 }
4449
4450 /*
4451 * Switch CR3 for the new task.
4452 */
4453 if ( fIsNewTSS386
4454 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4455 {
4456 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4457 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4458 AssertRCSuccessReturn(rc, rc);
4459
4460 /* Inform PGM. */
4461 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4462 AssertRCReturn(rc, rc);
4463 /* ignore informational status codes */
4464
4465 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4466 }
4467
4468 /*
4469 * Switch LDTR for the new task.
4470 */
4471 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4472 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4473 else
4474 {
4475 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4476
4477 IEMSELDESC DescNewLdt;
4478 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4479 if (rcStrict != VINF_SUCCESS)
4480 {
4481 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4482 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4483 return rcStrict;
4484 }
4485 if ( !DescNewLdt.Legacy.Gen.u1Present
4486 || DescNewLdt.Legacy.Gen.u1DescType
4487 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4488 {
4489 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4490 uNewLdt, DescNewLdt.Legacy.u));
4491 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4492 }
4493
4494 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4495 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4496 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4497 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4498 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4499 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4500 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4501 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4502 }
4503
4504 IEMSELDESC DescSS;
4505 if (IEM_IS_V86_MODE(pVCpu))
4506 {
4507 pVCpu->iem.s.uCpl = 3;
4508 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4509 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4510 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4511 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4512 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4513 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4514
4515 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4516 DescSS.Legacy.u = 0;
4517 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4518 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4519 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4520 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4521 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4522 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4523 DescSS.Legacy.Gen.u2Dpl = 3;
4524 }
4525 else
4526 {
4527 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4528
4529 /*
4530 * Load the stack segment for the new task.
4531 */
4532 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4533 {
4534 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4535 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4536 }
4537
4538 /* Fetch the descriptor. */
4539 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4540 if (rcStrict != VINF_SUCCESS)
4541 {
4542 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4543 VBOXSTRICTRC_VAL(rcStrict)));
4544 return rcStrict;
4545 }
4546
4547 /* SS must be a data segment and writable. */
4548 if ( !DescSS.Legacy.Gen.u1DescType
4549 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4550 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4551 {
4552 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4553 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4554 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4555 }
4556
4557 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4558 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4559 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4560 {
4561 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4562 uNewCpl));
4563 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4564 }
4565
4566 /* Is it there? */
4567 if (!DescSS.Legacy.Gen.u1Present)
4568 {
4569 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4570 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4571 }
4572
4573 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4574 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4575
4576 /* Set the accessed bit before committing the result into SS. */
4577 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4578 {
4579 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4580 if (rcStrict != VINF_SUCCESS)
4581 return rcStrict;
4582 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4583 }
4584
4585 /* Commit SS. */
4586 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4587 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4588 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4589 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4590 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4591 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4592 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4593
4594 /* CPL has changed, update IEM before loading rest of segments. */
4595 pVCpu->iem.s.uCpl = uNewCpl;
4596
4597 /*
4598 * Load the data segments for the new task.
4599 */
4600 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4601 if (rcStrict != VINF_SUCCESS)
4602 return rcStrict;
4603 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4604 if (rcStrict != VINF_SUCCESS)
4605 return rcStrict;
4606 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4607 if (rcStrict != VINF_SUCCESS)
4608 return rcStrict;
4609 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4610 if (rcStrict != VINF_SUCCESS)
4611 return rcStrict;
4612
4613 /*
4614 * Load the code segment for the new task.
4615 */
4616 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4617 {
4618 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4619 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4620 }
4621
4622 /* Fetch the descriptor. */
4623 IEMSELDESC DescCS;
4624 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4625 if (rcStrict != VINF_SUCCESS)
4626 {
4627 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4628 return rcStrict;
4629 }
4630
4631 /* CS must be a code segment. */
4632 if ( !DescCS.Legacy.Gen.u1DescType
4633 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4634 {
4635 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4636 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4637 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4638 }
4639
4640 /* For conforming CS, DPL must be less than or equal to the RPL. */
4641 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4642 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4643 {
4644 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4645 DescCS.Legacy.Gen.u2Dpl));
4646 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4647 }
4648
4649 /* For non-conforming CS, DPL must match RPL. */
4650 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4651 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4652 {
4653 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4654 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4655 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4656 }
4657
4658 /* Is it there? */
4659 if (!DescCS.Legacy.Gen.u1Present)
4660 {
4661 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4662 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4663 }
4664
4665 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4666 u64Base = X86DESC_BASE(&DescCS.Legacy);
4667
4668 /* Set the accessed bit before committing the result into CS. */
4669 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4670 {
4671 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4672 if (rcStrict != VINF_SUCCESS)
4673 return rcStrict;
4674 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4675 }
4676
4677 /* Commit CS. */
4678 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4679 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4680 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4681 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4682 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4683 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4684 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4685 }
4686
4687 /** @todo Debug trap. */
4688 if (fIsNewTSS386 && fNewDebugTrap)
4689 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4690
4691 /*
4692 * Construct the error code masks based on what caused this task switch.
4693 * See Intel Instruction reference for INT.
4694 */
4695 uint16_t uExt;
4696 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4697 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4698 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4699 {
4700 uExt = 1;
4701 }
4702 else
4703 uExt = 0;
4704
4705 /*
4706 * Push any error code on to the new stack.
4707 */
4708 if (fFlags & IEM_XCPT_FLAGS_ERR)
4709 {
4710 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4711 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4712 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4713
4714 /* Check that there is sufficient space on the stack. */
4715 /** @todo Factor out segment limit checking for normal/expand down segments
4716 * into a separate function. */
4717 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4718 {
4719 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4720 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4721 {
4722 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4723 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4724 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4725 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4726 }
4727 }
4728 else
4729 {
4730 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4731 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4732 {
4733 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4734 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4735 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4736 }
4737 }
4738
4739
4740 if (fIsNewTSS386)
4741 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4742 else
4743 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4744 if (rcStrict != VINF_SUCCESS)
4745 {
4746 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4747 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4748 return rcStrict;
4749 }
4750 }
4751
4752 /* Check the new EIP against the new CS limit. */
4753 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4754 {
4755 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4756 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4757 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4758 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4759 }
4760
4761 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4762 pVCpu->cpum.GstCtx.ss.Sel));
4763 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4764}
4765
4766
4767/**
4768 * Implements exceptions and interrupts for protected mode.
4769 *
4770 * @returns VBox strict status code.
4771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4772 * @param cbInstr The number of bytes to offset rIP by in the return
4773 * address.
4774 * @param u8Vector The interrupt / exception vector number.
4775 * @param fFlags The flags.
4776 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4777 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4778 */
4779IEM_STATIC VBOXSTRICTRC
4780iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4781 uint8_t cbInstr,
4782 uint8_t u8Vector,
4783 uint32_t fFlags,
4784 uint16_t uErr,
4785 uint64_t uCr2)
4786{
4787 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4788
4789 /*
4790 * Read the IDT entry.
4791 */
4792 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4793 {
4794 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4795 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4796 }
4797 X86DESC Idte;
4798 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4799 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4800 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4801 {
4802 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4803 return rcStrict;
4804 }
4805 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4806 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4807 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4808
4809 /*
4810 * Check the descriptor type, DPL and such.
4811 * ASSUMES this is done in the same order as described for call-gate calls.
4812 */
4813 if (Idte.Gate.u1DescType)
4814 {
4815 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4816 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4817 }
4818 bool fTaskGate = false;
4819 uint8_t f32BitGate = true;
4820 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4821 switch (Idte.Gate.u4Type)
4822 {
4823 case X86_SEL_TYPE_SYS_UNDEFINED:
4824 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4825 case X86_SEL_TYPE_SYS_LDT:
4826 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4827 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4828 case X86_SEL_TYPE_SYS_UNDEFINED2:
4829 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4830 case X86_SEL_TYPE_SYS_UNDEFINED3:
4831 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4832 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4833 case X86_SEL_TYPE_SYS_UNDEFINED4:
4834 {
4835 /** @todo check what actually happens when the type is wrong...
4836 * esp. call gates. */
4837 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4838 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4839 }
4840
4841 case X86_SEL_TYPE_SYS_286_INT_GATE:
4842 f32BitGate = false;
4843 RT_FALL_THRU();
4844 case X86_SEL_TYPE_SYS_386_INT_GATE:
4845 fEflToClear |= X86_EFL_IF;
4846 break;
4847
4848 case X86_SEL_TYPE_SYS_TASK_GATE:
4849 fTaskGate = true;
4850#ifndef IEM_IMPLEMENTS_TASKSWITCH
4851 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4852#endif
4853 break;
4854
4855 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4856 f32BitGate = false;
4857 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4858 break;
4859
4860 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4861 }
4862
4863 /* Check DPL against CPL if applicable. */
4864 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4865 {
4866 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4869 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4870 }
4871 }
4872
4873 /* Is it there? */
4874 if (!Idte.Gate.u1Present)
4875 {
4876 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4877 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4878 }
4879
4880 /* Is it a task-gate? */
4881 if (fTaskGate)
4882 {
4883 /*
4884 * Construct the error code masks based on what caused this task switch.
4885 * See Intel Instruction reference for INT.
4886 */
4887 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4888 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4889 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4890 RTSEL SelTSS = Idte.Gate.u16Sel;
4891
4892 /*
4893 * Fetch the TSS descriptor in the GDT.
4894 */
4895 IEMSELDESC DescTSS;
4896 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4897 if (rcStrict != VINF_SUCCESS)
4898 {
4899 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4900 VBOXSTRICTRC_VAL(rcStrict)));
4901 return rcStrict;
4902 }
4903
4904 /* The TSS descriptor must be a system segment and be available (not busy). */
4905 if ( DescTSS.Legacy.Gen.u1DescType
4906 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4907 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4908 {
4909 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4910 u8Vector, SelTSS, DescTSS.Legacy.au64));
4911 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4912 }
4913
4914 /* The TSS must be present. */
4915 if (!DescTSS.Legacy.Gen.u1Present)
4916 {
4917 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4918 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4919 }
4920
4921 /* Do the actual task switch. */
4922 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4923 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4924 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4925 }
4926
4927 /* A null CS is bad. */
4928 RTSEL NewCS = Idte.Gate.u16Sel;
4929 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4930 {
4931 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4932 return iemRaiseGeneralProtectionFault0(pVCpu);
4933 }
4934
4935 /* Fetch the descriptor for the new CS. */
4936 IEMSELDESC DescCS;
4937 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4938 if (rcStrict != VINF_SUCCESS)
4939 {
4940 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4941 return rcStrict;
4942 }
4943
4944 /* Must be a code segment. */
4945 if (!DescCS.Legacy.Gen.u1DescType)
4946 {
4947 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4948 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4949 }
4950 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4951 {
4952 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4953 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4954 }
4955
4956 /* Don't allow lowering the privilege level. */
4957 /** @todo Does the lowering of privileges apply to software interrupts
4958 * only? This has bearings on the more-privileged or
4959 * same-privilege stack behavior further down. A testcase would
4960 * be nice. */
4961 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4962 {
4963 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4964 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4965 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4966 }
4967
4968 /* Make sure the selector is present. */
4969 if (!DescCS.Legacy.Gen.u1Present)
4970 {
4971 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4972 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4973 }
4974
4975 /* Check the new EIP against the new CS limit. */
4976 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4977 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4978 ? Idte.Gate.u16OffsetLow
4979 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4980 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4981 if (uNewEip > cbLimitCS)
4982 {
4983 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4984 u8Vector, uNewEip, cbLimitCS, NewCS));
4985 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4986 }
4987 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4988
4989 /* Calc the flag image to push. */
4990 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4991 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4992 fEfl &= ~X86_EFL_RF;
4993 else
4994 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4995
4996 /* From V8086 mode only go to CPL 0. */
4997 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4998 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4999 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5000 {
5001 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5002 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5003 }
5004
5005 /*
5006 * If the privilege level changes, we need to get a new stack from the TSS.
5007 * This in turns means validating the new SS and ESP...
5008 */
5009 if (uNewCpl != pVCpu->iem.s.uCpl)
5010 {
5011 RTSEL NewSS;
5012 uint32_t uNewEsp;
5013 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5014 if (rcStrict != VINF_SUCCESS)
5015 return rcStrict;
5016
5017 IEMSELDESC DescSS;
5018 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5019 if (rcStrict != VINF_SUCCESS)
5020 return rcStrict;
5021 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5022 if (!DescSS.Legacy.Gen.u1DefBig)
5023 {
5024 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5025 uNewEsp = (uint16_t)uNewEsp;
5026 }
5027
5028 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5029
5030 /* Check that there is sufficient space for the stack frame. */
5031 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5032 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5033 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5034 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5035
5036 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5037 {
5038 if ( uNewEsp - 1 > cbLimitSS
5039 || uNewEsp < cbStackFrame)
5040 {
5041 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5042 u8Vector, NewSS, uNewEsp, cbStackFrame));
5043 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5044 }
5045 }
5046 else
5047 {
5048 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5049 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5050 {
5051 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5052 u8Vector, NewSS, uNewEsp, cbStackFrame));
5053 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5054 }
5055 }
5056
5057 /*
5058 * Start making changes.
5059 */
5060
5061 /* Set the new CPL so that stack accesses use it. */
5062 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5063 pVCpu->iem.s.uCpl = uNewCpl;
5064
5065 /* Create the stack frame. */
5066 RTPTRUNION uStackFrame;
5067 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5068 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5069 if (rcStrict != VINF_SUCCESS)
5070 return rcStrict;
5071 void * const pvStackFrame = uStackFrame.pv;
5072 if (f32BitGate)
5073 {
5074 if (fFlags & IEM_XCPT_FLAGS_ERR)
5075 *uStackFrame.pu32++ = uErr;
5076 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5077 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5078 uStackFrame.pu32[2] = fEfl;
5079 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5080 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5081 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5082 if (fEfl & X86_EFL_VM)
5083 {
5084 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5085 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5086 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5087 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5088 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5089 }
5090 }
5091 else
5092 {
5093 if (fFlags & IEM_XCPT_FLAGS_ERR)
5094 *uStackFrame.pu16++ = uErr;
5095 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5096 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5097 uStackFrame.pu16[2] = fEfl;
5098 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5099 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5100 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5101 if (fEfl & X86_EFL_VM)
5102 {
5103 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5104 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5105 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5106 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5107 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5108 }
5109 }
5110 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5111 if (rcStrict != VINF_SUCCESS)
5112 return rcStrict;
5113
5114 /* Mark the selectors 'accessed' (hope this is the correct time). */
5115 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5116 * after pushing the stack frame? (Write protect the gdt + stack to
5117 * find out.) */
5118 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5119 {
5120 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5121 if (rcStrict != VINF_SUCCESS)
5122 return rcStrict;
5123 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5124 }
5125
5126 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5127 {
5128 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5129 if (rcStrict != VINF_SUCCESS)
5130 return rcStrict;
5131 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5132 }
5133
5134 /*
5135 * Start comitting the register changes (joins with the DPL=CPL branch).
5136 */
5137 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5138 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5139 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5140 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5141 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5142 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5143 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5144 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5145 * SP is loaded).
5146 * Need to check the other combinations too:
5147 * - 16-bit TSS, 32-bit handler
5148 * - 32-bit TSS, 16-bit handler */
5149 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5150 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5151 else
5152 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5153
5154 if (fEfl & X86_EFL_VM)
5155 {
5156 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5157 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5158 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5159 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5160 }
5161 }
5162 /*
5163 * Same privilege, no stack change and smaller stack frame.
5164 */
5165 else
5166 {
5167 uint64_t uNewRsp;
5168 RTPTRUNION uStackFrame;
5169 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5170 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5171 if (rcStrict != VINF_SUCCESS)
5172 return rcStrict;
5173 void * const pvStackFrame = uStackFrame.pv;
5174
5175 if (f32BitGate)
5176 {
5177 if (fFlags & IEM_XCPT_FLAGS_ERR)
5178 *uStackFrame.pu32++ = uErr;
5179 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5180 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5181 uStackFrame.pu32[2] = fEfl;
5182 }
5183 else
5184 {
5185 if (fFlags & IEM_XCPT_FLAGS_ERR)
5186 *uStackFrame.pu16++ = uErr;
5187 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5188 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5189 uStackFrame.pu16[2] = fEfl;
5190 }
5191 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5192 if (rcStrict != VINF_SUCCESS)
5193 return rcStrict;
5194
5195 /* Mark the CS selector as 'accessed'. */
5196 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5197 {
5198 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5199 if (rcStrict != VINF_SUCCESS)
5200 return rcStrict;
5201 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5202 }
5203
5204 /*
5205 * Start committing the register changes (joins with the other branch).
5206 */
5207 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5208 }
5209
5210 /* ... register committing continues. */
5211 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5212 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5213 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5214 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5215 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5216 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5217
5218 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5219 fEfl &= ~fEflToClear;
5220 IEMMISC_SET_EFL(pVCpu, fEfl);
5221
5222 if (fFlags & IEM_XCPT_FLAGS_CR2)
5223 pVCpu->cpum.GstCtx.cr2 = uCr2;
5224
5225 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5226 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5227
5228 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5229}
5230
5231
5232/**
5233 * Implements exceptions and interrupts for long mode.
5234 *
5235 * @returns VBox strict status code.
5236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5237 * @param cbInstr The number of bytes to offset rIP by in the return
5238 * address.
5239 * @param u8Vector The interrupt / exception vector number.
5240 * @param fFlags The flags.
5241 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5242 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5243 */
5244IEM_STATIC VBOXSTRICTRC
5245iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5246 uint8_t cbInstr,
5247 uint8_t u8Vector,
5248 uint32_t fFlags,
5249 uint16_t uErr,
5250 uint64_t uCr2)
5251{
5252 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5253
5254 /*
5255 * Read the IDT entry.
5256 */
5257 uint16_t offIdt = (uint16_t)u8Vector << 4;
5258 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5259 {
5260 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5261 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5262 }
5263 X86DESC64 Idte;
5264 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5265 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5266 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5267 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5268 {
5269 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5270 return rcStrict;
5271 }
5272 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5273 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5274 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5275
5276 /*
5277 * Check the descriptor type, DPL and such.
5278 * ASSUMES this is done in the same order as described for call-gate calls.
5279 */
5280 if (Idte.Gate.u1DescType)
5281 {
5282 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5283 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5284 }
5285 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5286 switch (Idte.Gate.u4Type)
5287 {
5288 case AMD64_SEL_TYPE_SYS_INT_GATE:
5289 fEflToClear |= X86_EFL_IF;
5290 break;
5291 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5292 break;
5293
5294 default:
5295 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5296 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5297 }
5298
5299 /* Check DPL against CPL if applicable. */
5300 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5301 {
5302 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5303 {
5304 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5305 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5306 }
5307 }
5308
5309 /* Is it there? */
5310 if (!Idte.Gate.u1Present)
5311 {
5312 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5313 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5314 }
5315
5316 /* A null CS is bad. */
5317 RTSEL NewCS = Idte.Gate.u16Sel;
5318 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5319 {
5320 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5321 return iemRaiseGeneralProtectionFault0(pVCpu);
5322 }
5323
5324 /* Fetch the descriptor for the new CS. */
5325 IEMSELDESC DescCS;
5326 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5327 if (rcStrict != VINF_SUCCESS)
5328 {
5329 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5330 return rcStrict;
5331 }
5332
5333 /* Must be a 64-bit code segment. */
5334 if (!DescCS.Long.Gen.u1DescType)
5335 {
5336 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5337 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5338 }
5339 if ( !DescCS.Long.Gen.u1Long
5340 || DescCS.Long.Gen.u1DefBig
5341 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5342 {
5343 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5344 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5345 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5346 }
5347
5348 /* Don't allow lowering the privilege level. For non-conforming CS
5349 selectors, the CS.DPL sets the privilege level the trap/interrupt
5350 handler runs at. For conforming CS selectors, the CPL remains
5351 unchanged, but the CS.DPL must be <= CPL. */
5352 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5353 * when CPU in Ring-0. Result \#GP? */
5354 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5355 {
5356 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5357 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5358 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5359 }
5360
5361
5362 /* Make sure the selector is present. */
5363 if (!DescCS.Legacy.Gen.u1Present)
5364 {
5365 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5366 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5367 }
5368
5369 /* Check that the new RIP is canonical. */
5370 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5371 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5372 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5373 if (!IEM_IS_CANONICAL(uNewRip))
5374 {
5375 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5376 return iemRaiseGeneralProtectionFault0(pVCpu);
5377 }
5378
5379 /*
5380 * If the privilege level changes or if the IST isn't zero, we need to get
5381 * a new stack from the TSS.
5382 */
5383 uint64_t uNewRsp;
5384 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5385 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5386 if ( uNewCpl != pVCpu->iem.s.uCpl
5387 || Idte.Gate.u3IST != 0)
5388 {
5389 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5390 if (rcStrict != VINF_SUCCESS)
5391 return rcStrict;
5392 }
5393 else
5394 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5395 uNewRsp &= ~(uint64_t)0xf;
5396
5397 /*
5398 * Calc the flag image to push.
5399 */
5400 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5401 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5402 fEfl &= ~X86_EFL_RF;
5403 else
5404 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5405
5406 /*
5407 * Start making changes.
5408 */
5409 /* Set the new CPL so that stack accesses use it. */
5410 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5411 pVCpu->iem.s.uCpl = uNewCpl;
5412
5413 /* Create the stack frame. */
5414 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5415 RTPTRUNION uStackFrame;
5416 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5417 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5418 if (rcStrict != VINF_SUCCESS)
5419 return rcStrict;
5420 void * const pvStackFrame = uStackFrame.pv;
5421
5422 if (fFlags & IEM_XCPT_FLAGS_ERR)
5423 *uStackFrame.pu64++ = uErr;
5424 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5425 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5426 uStackFrame.pu64[2] = fEfl;
5427 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5428 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5429 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5430 if (rcStrict != VINF_SUCCESS)
5431 return rcStrict;
5432
5433 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5434 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5435 * after pushing the stack frame? (Write protect the gdt + stack to
5436 * find out.) */
5437 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5438 {
5439 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5440 if (rcStrict != VINF_SUCCESS)
5441 return rcStrict;
5442 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5443 }
5444
5445 /*
5446 * Start comitting the register changes.
5447 */
5448 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5449 * hidden registers when interrupting 32-bit or 16-bit code! */
5450 if (uNewCpl != uOldCpl)
5451 {
5452 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5453 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5454 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5455 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5456 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5457 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5458 }
5459 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5460 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5461 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5462 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5463 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5464 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5465 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5466 pVCpu->cpum.GstCtx.rip = uNewRip;
5467
5468 fEfl &= ~fEflToClear;
5469 IEMMISC_SET_EFL(pVCpu, fEfl);
5470
5471 if (fFlags & IEM_XCPT_FLAGS_CR2)
5472 pVCpu->cpum.GstCtx.cr2 = uCr2;
5473
5474 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5475 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5476
5477 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5478}
5479
5480
5481/**
5482 * Implements exceptions and interrupts.
5483 *
5484 * All exceptions and interrupts goes thru this function!
5485 *
5486 * @returns VBox strict status code.
5487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5488 * @param cbInstr The number of bytes to offset rIP by in the return
5489 * address.
5490 * @param u8Vector The interrupt / exception vector number.
5491 * @param fFlags The flags.
5492 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5493 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5494 */
5495DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5496iemRaiseXcptOrInt(PVMCPU pVCpu,
5497 uint8_t cbInstr,
5498 uint8_t u8Vector,
5499 uint32_t fFlags,
5500 uint16_t uErr,
5501 uint64_t uCr2)
5502{
5503 /*
5504 * Get all the state that we might need here.
5505 */
5506 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5507 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5508
5509#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5510 /*
5511 * Flush prefetch buffer
5512 */
5513 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5514#endif
5515
5516 /*
5517 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5518 */
5519 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5520 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5521 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5522 | IEM_XCPT_FLAGS_BP_INSTR
5523 | IEM_XCPT_FLAGS_ICEBP_INSTR
5524 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5525 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5526 {
5527 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5528 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5529 u8Vector = X86_XCPT_GP;
5530 uErr = 0;
5531 }
5532#ifdef DBGFTRACE_ENABLED
5533 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5534 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5535 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5536#endif
5537
5538 /*
5539 * Evaluate whether NMI blocking should be in effect.
5540 * Normally, NMI blocking is in effect whenever we inject an NMI.
5541 */
5542 bool fBlockNmi;
5543 if ( u8Vector == X86_XCPT_NMI
5544 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5545 fBlockNmi = true;
5546 else
5547 fBlockNmi = false;
5548
5549#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5550 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5551 {
5552 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5553 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5554 return rcStrict0;
5555
5556 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5557 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5558 {
5559 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5560 fBlockNmi = false;
5561 }
5562 }
5563#endif
5564
5565#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5566 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5567 {
5568 /*
5569 * If the event is being injected as part of VMRUN, it isn't subject to event
5570 * intercepts in the nested-guest. However, secondary exceptions that occur
5571 * during injection of any event -are- subject to exception intercepts.
5572 *
5573 * See AMD spec. 15.20 "Event Injection".
5574 */
5575 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5576 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5577 else
5578 {
5579 /*
5580 * Check and handle if the event being raised is intercepted.
5581 */
5582 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5583 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5584 return rcStrict0;
5585 }
5586 }
5587#endif
5588
5589 /*
5590 * Set NMI blocking if necessary.
5591 */
5592 if ( fBlockNmi
5593 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5594 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5595
5596 /*
5597 * Do recursion accounting.
5598 */
5599 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5600 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5601 if (pVCpu->iem.s.cXcptRecursions == 0)
5602 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5603 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5604 else
5605 {
5606 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5607 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5608 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5609
5610 if (pVCpu->iem.s.cXcptRecursions >= 4)
5611 {
5612#ifdef DEBUG_bird
5613 AssertFailed();
5614#endif
5615 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5616 }
5617
5618 /*
5619 * Evaluate the sequence of recurring events.
5620 */
5621 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5622 NULL /* pXcptRaiseInfo */);
5623 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5624 { /* likely */ }
5625 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5626 {
5627 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5628 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5629 u8Vector = X86_XCPT_DF;
5630 uErr = 0;
5631#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5632 /* VMX nested-guest #DF intercept needs to be checked here. */
5633 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5634 {
5635 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5636 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5637 return rcStrict0;
5638 }
5639#endif
5640 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5641 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5642 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5643 }
5644 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5645 {
5646 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5647 return iemInitiateCpuShutdown(pVCpu);
5648 }
5649 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5650 {
5651 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5652 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5653 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5654 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5655 return VERR_EM_GUEST_CPU_HANG;
5656 }
5657 else
5658 {
5659 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5660 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5661 return VERR_IEM_IPE_9;
5662 }
5663
5664 /*
5665 * The 'EXT' bit is set when an exception occurs during deliver of an external
5666 * event (such as an interrupt or earlier exception)[1]. Privileged software
5667 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5668 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5669 *
5670 * [1] - Intel spec. 6.13 "Error Code"
5671 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5672 * [3] - Intel Instruction reference for INT n.
5673 */
5674 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5675 && (fFlags & IEM_XCPT_FLAGS_ERR)
5676 && u8Vector != X86_XCPT_PF
5677 && u8Vector != X86_XCPT_DF)
5678 {
5679 uErr |= X86_TRAP_ERR_EXTERNAL;
5680 }
5681 }
5682
5683 pVCpu->iem.s.cXcptRecursions++;
5684 pVCpu->iem.s.uCurXcpt = u8Vector;
5685 pVCpu->iem.s.fCurXcpt = fFlags;
5686 pVCpu->iem.s.uCurXcptErr = uErr;
5687 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5688
5689 /*
5690 * Extensive logging.
5691 */
5692#if defined(LOG_ENABLED) && defined(IN_RING3)
5693 if (LogIs3Enabled())
5694 {
5695 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5696 PVM pVM = pVCpu->CTX_SUFF(pVM);
5697 char szRegs[4096];
5698 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5699 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5700 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5701 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5702 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5703 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5704 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5705 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5706 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5707 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5708 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5709 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5710 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5711 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5712 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5713 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5714 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5715 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5716 " efer=%016VR{efer}\n"
5717 " pat=%016VR{pat}\n"
5718 " sf_mask=%016VR{sf_mask}\n"
5719 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5720 " lstar=%016VR{lstar}\n"
5721 " star=%016VR{star} cstar=%016VR{cstar}\n"
5722 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5723 );
5724
5725 char szInstr[256];
5726 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5727 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5728 szInstr, sizeof(szInstr), NULL);
5729 Log3(("%s%s\n", szRegs, szInstr));
5730 }
5731#endif /* LOG_ENABLED */
5732
5733 /*
5734 * Call the mode specific worker function.
5735 */
5736 VBOXSTRICTRC rcStrict;
5737 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5738 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5739 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5740 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5741 else
5742 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5743
5744 /* Flush the prefetch buffer. */
5745#ifdef IEM_WITH_CODE_TLB
5746 pVCpu->iem.s.pbInstrBuf = NULL;
5747#else
5748 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5749#endif
5750
5751 /*
5752 * Unwind.
5753 */
5754 pVCpu->iem.s.cXcptRecursions--;
5755 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5756 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5757 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5758 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5759 pVCpu->iem.s.cXcptRecursions + 1));
5760 return rcStrict;
5761}
5762
5763#ifdef IEM_WITH_SETJMP
5764/**
5765 * See iemRaiseXcptOrInt. Will not return.
5766 */
5767IEM_STATIC DECL_NO_RETURN(void)
5768iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5769 uint8_t cbInstr,
5770 uint8_t u8Vector,
5771 uint32_t fFlags,
5772 uint16_t uErr,
5773 uint64_t uCr2)
5774{
5775 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5776 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5777}
5778#endif
5779
5780
5781/** \#DE - 00. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5785}
5786
5787
5788/** \#DB - 01.
5789 * @note This automatically clear DR7.GD. */
5790DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5791{
5792 /** @todo set/clear RF. */
5793 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5794 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5795}
5796
5797
5798/** \#BR - 05. */
5799DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5800{
5801 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5802}
5803
5804
5805/** \#UD - 06. */
5806DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5807{
5808 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5809}
5810
5811
5812/** \#NM - 07. */
5813DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5814{
5815 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5816}
5817
5818
5819/** \#TS(err) - 0a. */
5820DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5821{
5822 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5823}
5824
5825
5826/** \#TS(tr) - 0a. */
5827DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5828{
5829 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5830 pVCpu->cpum.GstCtx.tr.Sel, 0);
5831}
5832
5833
5834/** \#TS(0) - 0a. */
5835DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5836{
5837 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5838 0, 0);
5839}
5840
5841
5842/** \#TS(err) - 0a. */
5843DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5844{
5845 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5846 uSel & X86_SEL_MASK_OFF_RPL, 0);
5847}
5848
5849
5850/** \#NP(err) - 0b. */
5851DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5852{
5853 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5854}
5855
5856
5857/** \#NP(sel) - 0b. */
5858DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5859{
5860 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5861 uSel & ~X86_SEL_RPL, 0);
5862}
5863
5864
5865/** \#SS(seg) - 0c. */
5866DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5867{
5868 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5869 uSel & ~X86_SEL_RPL, 0);
5870}
5871
5872
5873/** \#SS(err) - 0c. */
5874DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5875{
5876 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5877}
5878
5879
5880/** \#GP(n) - 0d. */
5881DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5882{
5883 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5884}
5885
5886
5887/** \#GP(0) - 0d. */
5888DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5889{
5890 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5891}
5892
5893#ifdef IEM_WITH_SETJMP
5894/** \#GP(0) - 0d. */
5895DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5896{
5897 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5898}
5899#endif
5900
5901
5902/** \#GP(sel) - 0d. */
5903DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5904{
5905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5906 Sel & ~X86_SEL_RPL, 0);
5907}
5908
5909
5910/** \#GP(0) - 0d. */
5911DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5912{
5913 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5914}
5915
5916
5917/** \#GP(sel) - 0d. */
5918DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5919{
5920 NOREF(iSegReg); NOREF(fAccess);
5921 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5922 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5923}
5924
5925#ifdef IEM_WITH_SETJMP
5926/** \#GP(sel) - 0d, longjmp. */
5927DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5928{
5929 NOREF(iSegReg); NOREF(fAccess);
5930 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5931 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5932}
5933#endif
5934
5935/** \#GP(sel) - 0d. */
5936DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5937{
5938 NOREF(Sel);
5939 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5940}
5941
5942#ifdef IEM_WITH_SETJMP
5943/** \#GP(sel) - 0d, longjmp. */
5944DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5945{
5946 NOREF(Sel);
5947 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5948}
5949#endif
5950
5951
5952/** \#GP(sel) - 0d. */
5953DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5954{
5955 NOREF(iSegReg); NOREF(fAccess);
5956 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5957}
5958
5959#ifdef IEM_WITH_SETJMP
5960/** \#GP(sel) - 0d, longjmp. */
5961DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5962 uint32_t fAccess)
5963{
5964 NOREF(iSegReg); NOREF(fAccess);
5965 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5966}
5967#endif
5968
5969
5970/** \#PF(n) - 0e. */
5971DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5972{
5973 uint16_t uErr;
5974 switch (rc)
5975 {
5976 case VERR_PAGE_NOT_PRESENT:
5977 case VERR_PAGE_TABLE_NOT_PRESENT:
5978 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5979 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5980 uErr = 0;
5981 break;
5982
5983 default:
5984 AssertMsgFailed(("%Rrc\n", rc));
5985 RT_FALL_THRU();
5986 case VERR_ACCESS_DENIED:
5987 uErr = X86_TRAP_PF_P;
5988 break;
5989
5990 /** @todo reserved */
5991 }
5992
5993 if (pVCpu->iem.s.uCpl == 3)
5994 uErr |= X86_TRAP_PF_US;
5995
5996 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5997 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5998 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5999 uErr |= X86_TRAP_PF_ID;
6000
6001#if 0 /* This is so much non-sense, really. Why was it done like that? */
6002 /* Note! RW access callers reporting a WRITE protection fault, will clear
6003 the READ flag before calling. So, read-modify-write accesses (RW)
6004 can safely be reported as READ faults. */
6005 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6006 uErr |= X86_TRAP_PF_RW;
6007#else
6008 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6009 {
6010 if (!(fAccess & IEM_ACCESS_TYPE_READ))
6011 uErr |= X86_TRAP_PF_RW;
6012 }
6013#endif
6014
6015 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6016 uErr, GCPtrWhere);
6017}
6018
6019#ifdef IEM_WITH_SETJMP
6020/** \#PF(n) - 0e, longjmp. */
6021IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6022{
6023 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6024}
6025#endif
6026
6027
6028/** \#MF(0) - 10. */
6029DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6030{
6031 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6032}
6033
6034
6035/** \#AC(0) - 11. */
6036DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6037{
6038 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6039}
6040
6041
6042/**
6043 * Macro for calling iemCImplRaiseDivideError().
6044 *
6045 * This enables us to add/remove arguments and force different levels of
6046 * inlining as we wish.
6047 *
6048 * @return Strict VBox status code.
6049 */
6050#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6051IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6052{
6053 NOREF(cbInstr);
6054 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6055}
6056
6057
6058/**
6059 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6060 *
6061 * This enables us to add/remove arguments and force different levels of
6062 * inlining as we wish.
6063 *
6064 * @return Strict VBox status code.
6065 */
6066#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6067IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6068{
6069 NOREF(cbInstr);
6070 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6071}
6072
6073
6074/**
6075 * Macro for calling iemCImplRaiseInvalidOpcode().
6076 *
6077 * This enables us to add/remove arguments and force different levels of
6078 * inlining as we wish.
6079 *
6080 * @return Strict VBox status code.
6081 */
6082#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6083IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6084{
6085 NOREF(cbInstr);
6086 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6087}
6088
6089
6090/** @} */
6091
6092
6093/*
6094 *
6095 * Helpers routines.
6096 * Helpers routines.
6097 * Helpers routines.
6098 *
6099 */
6100
6101/**
6102 * Recalculates the effective operand size.
6103 *
6104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6105 */
6106IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6107{
6108 switch (pVCpu->iem.s.enmCpuMode)
6109 {
6110 case IEMMODE_16BIT:
6111 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6112 break;
6113 case IEMMODE_32BIT:
6114 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6115 break;
6116 case IEMMODE_64BIT:
6117 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6118 {
6119 case 0:
6120 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6121 break;
6122 case IEM_OP_PRF_SIZE_OP:
6123 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6124 break;
6125 case IEM_OP_PRF_SIZE_REX_W:
6126 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6127 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6128 break;
6129 }
6130 break;
6131 default:
6132 AssertFailed();
6133 }
6134}
6135
6136
6137/**
6138 * Sets the default operand size to 64-bit and recalculates the effective
6139 * operand size.
6140 *
6141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6142 */
6143IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6144{
6145 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6146 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6147 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6148 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6149 else
6150 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6151}
6152
6153
6154/*
6155 *
6156 * Common opcode decoders.
6157 * Common opcode decoders.
6158 * Common opcode decoders.
6159 *
6160 */
6161//#include <iprt/mem.h>
6162
6163/**
6164 * Used to add extra details about a stub case.
6165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6166 */
6167IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6168{
6169#if defined(LOG_ENABLED) && defined(IN_RING3)
6170 PVM pVM = pVCpu->CTX_SUFF(pVM);
6171 char szRegs[4096];
6172 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6173 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6174 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6175 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6176 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6177 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6178 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6179 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6180 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6181 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6182 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6183 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6184 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6185 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6186 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6187 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6188 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6189 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6190 " efer=%016VR{efer}\n"
6191 " pat=%016VR{pat}\n"
6192 " sf_mask=%016VR{sf_mask}\n"
6193 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6194 " lstar=%016VR{lstar}\n"
6195 " star=%016VR{star} cstar=%016VR{cstar}\n"
6196 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6197 );
6198
6199 char szInstr[256];
6200 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6201 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6202 szInstr, sizeof(szInstr), NULL);
6203
6204 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6205#else
6206 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6207#endif
6208}
6209
6210/**
6211 * Complains about a stub.
6212 *
6213 * Providing two versions of this macro, one for daily use and one for use when
6214 * working on IEM.
6215 */
6216#if 0
6217# define IEMOP_BITCH_ABOUT_STUB() \
6218 do { \
6219 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6220 iemOpStubMsg2(pVCpu); \
6221 RTAssertPanic(); \
6222 } while (0)
6223#else
6224# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6225#endif
6226
6227/** Stubs an opcode. */
6228#define FNIEMOP_STUB(a_Name) \
6229 FNIEMOP_DEF(a_Name) \
6230 { \
6231 RT_NOREF_PV(pVCpu); \
6232 IEMOP_BITCH_ABOUT_STUB(); \
6233 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6234 } \
6235 typedef int ignore_semicolon
6236
6237/** Stubs an opcode. */
6238#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6239 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6240 { \
6241 RT_NOREF_PV(pVCpu); \
6242 RT_NOREF_PV(a_Name0); \
6243 IEMOP_BITCH_ABOUT_STUB(); \
6244 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6245 } \
6246 typedef int ignore_semicolon
6247
6248/** Stubs an opcode which currently should raise \#UD. */
6249#define FNIEMOP_UD_STUB(a_Name) \
6250 FNIEMOP_DEF(a_Name) \
6251 { \
6252 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6253 return IEMOP_RAISE_INVALID_OPCODE(); \
6254 } \
6255 typedef int ignore_semicolon
6256
6257/** Stubs an opcode which currently should raise \#UD. */
6258#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6259 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6260 { \
6261 RT_NOREF_PV(pVCpu); \
6262 RT_NOREF_PV(a_Name0); \
6263 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6264 return IEMOP_RAISE_INVALID_OPCODE(); \
6265 } \
6266 typedef int ignore_semicolon
6267
6268
6269
6270/** @name Register Access.
6271 * @{
6272 */
6273
6274/**
6275 * Gets a reference (pointer) to the specified hidden segment register.
6276 *
6277 * @returns Hidden register reference.
6278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6279 * @param iSegReg The segment register.
6280 */
6281IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6282{
6283 Assert(iSegReg < X86_SREG_COUNT);
6284 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6285 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6286
6287#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6288 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6289 { /* likely */ }
6290 else
6291 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6292#else
6293 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6294#endif
6295 return pSReg;
6296}
6297
6298
6299/**
6300 * Ensures that the given hidden segment register is up to date.
6301 *
6302 * @returns Hidden register reference.
6303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6304 * @param pSReg The segment register.
6305 */
6306IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6307{
6308#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6309 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6310 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6311#else
6312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6313 NOREF(pVCpu);
6314#endif
6315 return pSReg;
6316}
6317
6318
6319/**
6320 * Gets a reference (pointer) to the specified segment register (the selector
6321 * value).
6322 *
6323 * @returns Pointer to the selector variable.
6324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6325 * @param iSegReg The segment register.
6326 */
6327DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6328{
6329 Assert(iSegReg < X86_SREG_COUNT);
6330 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6331 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6332}
6333
6334
6335/**
6336 * Fetches the selector value of a segment register.
6337 *
6338 * @returns The selector value.
6339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6340 * @param iSegReg The segment register.
6341 */
6342DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6343{
6344 Assert(iSegReg < X86_SREG_COUNT);
6345 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6346 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6347}
6348
6349
6350/**
6351 * Fetches the base address value of a segment register.
6352 *
6353 * @returns The selector value.
6354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6355 * @param iSegReg The segment register.
6356 */
6357DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6358{
6359 Assert(iSegReg < X86_SREG_COUNT);
6360 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6361 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6362}
6363
6364
6365/**
6366 * Gets a reference (pointer) to the specified general purpose register.
6367 *
6368 * @returns Register reference.
6369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6370 * @param iReg The general purpose register.
6371 */
6372DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6373{
6374 Assert(iReg < 16);
6375 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6376}
6377
6378
6379/**
6380 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6381 *
6382 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6383 *
6384 * @returns Register reference.
6385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6386 * @param iReg The register.
6387 */
6388DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6389{
6390 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6391 {
6392 Assert(iReg < 16);
6393 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6394 }
6395 /* high 8-bit register. */
6396 Assert(iReg < 8);
6397 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6398}
6399
6400
6401/**
6402 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6403 *
6404 * @returns Register reference.
6405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6406 * @param iReg The register.
6407 */
6408DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6409{
6410 Assert(iReg < 16);
6411 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6412}
6413
6414
6415/**
6416 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6417 *
6418 * @returns Register reference.
6419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6420 * @param iReg The register.
6421 */
6422DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6423{
6424 Assert(iReg < 16);
6425 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6426}
6427
6428
6429/**
6430 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6431 *
6432 * @returns Register reference.
6433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6434 * @param iReg The register.
6435 */
6436DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6437{
6438 Assert(iReg < 64);
6439 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6440}
6441
6442
6443/**
6444 * Gets a reference (pointer) to the specified segment register's base address.
6445 *
6446 * @returns Segment register base address reference.
6447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6448 * @param iSegReg The segment selector.
6449 */
6450DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6451{
6452 Assert(iSegReg < X86_SREG_COUNT);
6453 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6454 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6455}
6456
6457
6458/**
6459 * Fetches the value of a 8-bit general purpose register.
6460 *
6461 * @returns The register value.
6462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6463 * @param iReg The register.
6464 */
6465DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6466{
6467 return *iemGRegRefU8(pVCpu, iReg);
6468}
6469
6470
6471/**
6472 * Fetches the value of a 16-bit general purpose register.
6473 *
6474 * @returns The register value.
6475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6476 * @param iReg The register.
6477 */
6478DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6479{
6480 Assert(iReg < 16);
6481 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6482}
6483
6484
6485/**
6486 * Fetches the value of a 32-bit general purpose register.
6487 *
6488 * @returns The register value.
6489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6490 * @param iReg The register.
6491 */
6492DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6493{
6494 Assert(iReg < 16);
6495 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6496}
6497
6498
6499/**
6500 * Fetches the value of a 64-bit general purpose register.
6501 *
6502 * @returns The register value.
6503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6504 * @param iReg The register.
6505 */
6506DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6507{
6508 Assert(iReg < 16);
6509 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6510}
6511
6512
6513/**
6514 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6515 *
6516 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6517 * segment limit.
6518 *
6519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6520 * @param offNextInstr The offset of the next instruction.
6521 */
6522IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6523{
6524 switch (pVCpu->iem.s.enmEffOpSize)
6525 {
6526 case IEMMODE_16BIT:
6527 {
6528 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6529 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6530 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6531 return iemRaiseGeneralProtectionFault0(pVCpu);
6532 pVCpu->cpum.GstCtx.rip = uNewIp;
6533 break;
6534 }
6535
6536 case IEMMODE_32BIT:
6537 {
6538 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6539 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6540
6541 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6542 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6543 return iemRaiseGeneralProtectionFault0(pVCpu);
6544 pVCpu->cpum.GstCtx.rip = uNewEip;
6545 break;
6546 }
6547
6548 case IEMMODE_64BIT:
6549 {
6550 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6551
6552 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6553 if (!IEM_IS_CANONICAL(uNewRip))
6554 return iemRaiseGeneralProtectionFault0(pVCpu);
6555 pVCpu->cpum.GstCtx.rip = uNewRip;
6556 break;
6557 }
6558
6559 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6560 }
6561
6562 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6563
6564#ifndef IEM_WITH_CODE_TLB
6565 /* Flush the prefetch buffer. */
6566 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6567#endif
6568
6569 return VINF_SUCCESS;
6570}
6571
6572
6573/**
6574 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6575 *
6576 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6577 * segment limit.
6578 *
6579 * @returns Strict VBox status code.
6580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6581 * @param offNextInstr The offset of the next instruction.
6582 */
6583IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6584{
6585 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6586
6587 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6588 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6589 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6590 return iemRaiseGeneralProtectionFault0(pVCpu);
6591 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6592 pVCpu->cpum.GstCtx.rip = uNewIp;
6593 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6594
6595#ifndef IEM_WITH_CODE_TLB
6596 /* Flush the prefetch buffer. */
6597 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6598#endif
6599
6600 return VINF_SUCCESS;
6601}
6602
6603
6604/**
6605 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6606 *
6607 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6608 * segment limit.
6609 *
6610 * @returns Strict VBox status code.
6611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6612 * @param offNextInstr The offset of the next instruction.
6613 */
6614IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6615{
6616 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6617
6618 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6619 {
6620 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6621
6622 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6623 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6624 return iemRaiseGeneralProtectionFault0(pVCpu);
6625 pVCpu->cpum.GstCtx.rip = uNewEip;
6626 }
6627 else
6628 {
6629 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6630
6631 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6632 if (!IEM_IS_CANONICAL(uNewRip))
6633 return iemRaiseGeneralProtectionFault0(pVCpu);
6634 pVCpu->cpum.GstCtx.rip = uNewRip;
6635 }
6636 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6637
6638#ifndef IEM_WITH_CODE_TLB
6639 /* Flush the prefetch buffer. */
6640 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6641#endif
6642
6643 return VINF_SUCCESS;
6644}
6645
6646
6647/**
6648 * Performs a near jump to the specified address.
6649 *
6650 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6651 * segment limit.
6652 *
6653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6654 * @param uNewRip The new RIP value.
6655 */
6656IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6657{
6658 switch (pVCpu->iem.s.enmEffOpSize)
6659 {
6660 case IEMMODE_16BIT:
6661 {
6662 Assert(uNewRip <= UINT16_MAX);
6663 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6664 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6665 return iemRaiseGeneralProtectionFault0(pVCpu);
6666 /** @todo Test 16-bit jump in 64-bit mode. */
6667 pVCpu->cpum.GstCtx.rip = uNewRip;
6668 break;
6669 }
6670
6671 case IEMMODE_32BIT:
6672 {
6673 Assert(uNewRip <= UINT32_MAX);
6674 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6675 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6676
6677 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6678 return iemRaiseGeneralProtectionFault0(pVCpu);
6679 pVCpu->cpum.GstCtx.rip = uNewRip;
6680 break;
6681 }
6682
6683 case IEMMODE_64BIT:
6684 {
6685 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6686
6687 if (!IEM_IS_CANONICAL(uNewRip))
6688 return iemRaiseGeneralProtectionFault0(pVCpu);
6689 pVCpu->cpum.GstCtx.rip = uNewRip;
6690 break;
6691 }
6692
6693 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6694 }
6695
6696 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6697
6698#ifndef IEM_WITH_CODE_TLB
6699 /* Flush the prefetch buffer. */
6700 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6701#endif
6702
6703 return VINF_SUCCESS;
6704}
6705
6706
6707/**
6708 * Get the address of the top of the stack.
6709 *
6710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6711 */
6712DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6713{
6714 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6715 return pVCpu->cpum.GstCtx.rsp;
6716 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6717 return pVCpu->cpum.GstCtx.esp;
6718 return pVCpu->cpum.GstCtx.sp;
6719}
6720
6721
6722/**
6723 * Updates the RIP/EIP/IP to point to the next instruction.
6724 *
6725 * This function leaves the EFLAGS.RF flag alone.
6726 *
6727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6728 * @param cbInstr The number of bytes to add.
6729 */
6730IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6731{
6732 switch (pVCpu->iem.s.enmCpuMode)
6733 {
6734 case IEMMODE_16BIT:
6735 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6736 pVCpu->cpum.GstCtx.eip += cbInstr;
6737 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6738 break;
6739
6740 case IEMMODE_32BIT:
6741 pVCpu->cpum.GstCtx.eip += cbInstr;
6742 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6743 break;
6744
6745 case IEMMODE_64BIT:
6746 pVCpu->cpum.GstCtx.rip += cbInstr;
6747 break;
6748 default: AssertFailed();
6749 }
6750}
6751
6752
6753#if 0
6754/**
6755 * Updates the RIP/EIP/IP to point to the next instruction.
6756 *
6757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6758 */
6759IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6760{
6761 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6762}
6763#endif
6764
6765
6766
6767/**
6768 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6769 *
6770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6771 * @param cbInstr The number of bytes to add.
6772 */
6773IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6774{
6775 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6776
6777 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6778#if ARCH_BITS >= 64
6779 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6780 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6781 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6782#else
6783 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6784 pVCpu->cpum.GstCtx.rip += cbInstr;
6785 else
6786 pVCpu->cpum.GstCtx.eip += cbInstr;
6787#endif
6788}
6789
6790
6791/**
6792 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6793 *
6794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6795 */
6796IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6797{
6798 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6799}
6800
6801
6802/**
6803 * Adds to the stack pointer.
6804 *
6805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6806 * @param cbToAdd The number of bytes to add (8-bit!).
6807 */
6808DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6809{
6810 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6811 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6812 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6813 pVCpu->cpum.GstCtx.esp += cbToAdd;
6814 else
6815 pVCpu->cpum.GstCtx.sp += cbToAdd;
6816}
6817
6818
6819/**
6820 * Subtracts from the stack pointer.
6821 *
6822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6823 * @param cbToSub The number of bytes to subtract (8-bit!).
6824 */
6825DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6826{
6827 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6828 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6829 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6830 pVCpu->cpum.GstCtx.esp -= cbToSub;
6831 else
6832 pVCpu->cpum.GstCtx.sp -= cbToSub;
6833}
6834
6835
6836/**
6837 * Adds to the temporary stack pointer.
6838 *
6839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6840 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6841 * @param cbToAdd The number of bytes to add (16-bit).
6842 */
6843DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6844{
6845 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6846 pTmpRsp->u += cbToAdd;
6847 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6848 pTmpRsp->DWords.dw0 += cbToAdd;
6849 else
6850 pTmpRsp->Words.w0 += cbToAdd;
6851}
6852
6853
6854/**
6855 * Subtracts from the temporary stack pointer.
6856 *
6857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6858 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6859 * @param cbToSub The number of bytes to subtract.
6860 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6861 * expecting that.
6862 */
6863DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6864{
6865 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6866 pTmpRsp->u -= cbToSub;
6867 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6868 pTmpRsp->DWords.dw0 -= cbToSub;
6869 else
6870 pTmpRsp->Words.w0 -= cbToSub;
6871}
6872
6873
6874/**
6875 * Calculates the effective stack address for a push of the specified size as
6876 * well as the new RSP value (upper bits may be masked).
6877 *
6878 * @returns Effective stack addressf for the push.
6879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6880 * @param cbItem The size of the stack item to pop.
6881 * @param puNewRsp Where to return the new RSP value.
6882 */
6883DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6884{
6885 RTUINT64U uTmpRsp;
6886 RTGCPTR GCPtrTop;
6887 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6888
6889 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6890 GCPtrTop = uTmpRsp.u -= cbItem;
6891 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6892 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6893 else
6894 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6895 *puNewRsp = uTmpRsp.u;
6896 return GCPtrTop;
6897}
6898
6899
6900/**
6901 * Gets the current stack pointer and calculates the value after a pop of the
6902 * specified size.
6903 *
6904 * @returns Current stack pointer.
6905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6906 * @param cbItem The size of the stack item to pop.
6907 * @param puNewRsp Where to return the new RSP value.
6908 */
6909DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6910{
6911 RTUINT64U uTmpRsp;
6912 RTGCPTR GCPtrTop;
6913 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6914
6915 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6916 {
6917 GCPtrTop = uTmpRsp.u;
6918 uTmpRsp.u += cbItem;
6919 }
6920 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6921 {
6922 GCPtrTop = uTmpRsp.DWords.dw0;
6923 uTmpRsp.DWords.dw0 += cbItem;
6924 }
6925 else
6926 {
6927 GCPtrTop = uTmpRsp.Words.w0;
6928 uTmpRsp.Words.w0 += cbItem;
6929 }
6930 *puNewRsp = uTmpRsp.u;
6931 return GCPtrTop;
6932}
6933
6934
6935/**
6936 * Calculates the effective stack address for a push of the specified size as
6937 * well as the new temporary RSP value (upper bits may be masked).
6938 *
6939 * @returns Effective stack addressf for the push.
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 * @param pTmpRsp The temporary stack pointer. This is updated.
6942 * @param cbItem The size of the stack item to pop.
6943 */
6944DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6945{
6946 RTGCPTR GCPtrTop;
6947
6948 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6949 GCPtrTop = pTmpRsp->u -= cbItem;
6950 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6951 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6952 else
6953 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6954 return GCPtrTop;
6955}
6956
6957
6958/**
6959 * Gets the effective stack address for a pop of the specified size and
6960 * calculates and updates the temporary RSP.
6961 *
6962 * @returns Current stack pointer.
6963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6964 * @param pTmpRsp The temporary stack pointer. This is updated.
6965 * @param cbItem The size of the stack item to pop.
6966 */
6967DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6968{
6969 RTGCPTR GCPtrTop;
6970 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6971 {
6972 GCPtrTop = pTmpRsp->u;
6973 pTmpRsp->u += cbItem;
6974 }
6975 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6976 {
6977 GCPtrTop = pTmpRsp->DWords.dw0;
6978 pTmpRsp->DWords.dw0 += cbItem;
6979 }
6980 else
6981 {
6982 GCPtrTop = pTmpRsp->Words.w0;
6983 pTmpRsp->Words.w0 += cbItem;
6984 }
6985 return GCPtrTop;
6986}
6987
6988/** @} */
6989
6990
6991/** @name FPU access and helpers.
6992 *
6993 * @{
6994 */
6995
6996
6997/**
6998 * Hook for preparing to use the host FPU.
6999 *
7000 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7001 *
7002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7003 */
7004DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
7005{
7006#ifdef IN_RING3
7007 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7008#else
7009 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7010#endif
7011 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7012}
7013
7014
7015/**
7016 * Hook for preparing to use the host FPU for SSE.
7017 *
7018 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7019 *
7020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7021 */
7022DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
7023{
7024 iemFpuPrepareUsage(pVCpu);
7025}
7026
7027
7028/**
7029 * Hook for preparing to use the host FPU for AVX.
7030 *
7031 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7032 *
7033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7034 */
7035DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7036{
7037 iemFpuPrepareUsage(pVCpu);
7038}
7039
7040
7041/**
7042 * Hook for actualizing the guest FPU state before the interpreter reads it.
7043 *
7044 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7045 *
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 */
7048DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7049{
7050#ifdef IN_RING3
7051 NOREF(pVCpu);
7052#else
7053 CPUMRZFpuStateActualizeForRead(pVCpu);
7054#endif
7055 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7056}
7057
7058
7059/**
7060 * Hook for actualizing the guest FPU state before the interpreter changes it.
7061 *
7062 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7063 *
7064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7065 */
7066DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7067{
7068#ifdef IN_RING3
7069 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7070#else
7071 CPUMRZFpuStateActualizeForChange(pVCpu);
7072#endif
7073 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7074}
7075
7076
7077/**
7078 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7079 * only.
7080 *
7081 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7082 *
7083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7084 */
7085DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7086{
7087#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7088 NOREF(pVCpu);
7089#else
7090 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7091#endif
7092 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7093}
7094
7095
7096/**
7097 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7098 * read+write.
7099 *
7100 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7101 *
7102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7103 */
7104DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7105{
7106#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7107 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7108#else
7109 CPUMRZFpuStateActualizeForChange(pVCpu);
7110#endif
7111 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7112}
7113
7114
7115/**
7116 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7117 * only.
7118 *
7119 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7120 *
7121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7122 */
7123DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7124{
7125#ifdef IN_RING3
7126 NOREF(pVCpu);
7127#else
7128 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7129#endif
7130 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7131}
7132
7133
7134/**
7135 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7136 * read+write.
7137 *
7138 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7139 *
7140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7141 */
7142DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7143{
7144#ifdef IN_RING3
7145 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7146#else
7147 CPUMRZFpuStateActualizeForChange(pVCpu);
7148#endif
7149 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7150}
7151
7152
7153/**
7154 * Stores a QNaN value into a FPU register.
7155 *
7156 * @param pReg Pointer to the register.
7157 */
7158DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7159{
7160 pReg->au32[0] = UINT32_C(0x00000000);
7161 pReg->au32[1] = UINT32_C(0xc0000000);
7162 pReg->au16[4] = UINT16_C(0xffff);
7163}
7164
7165
7166/**
7167 * Updates the FOP, FPU.CS and FPUIP registers.
7168 *
7169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7170 * @param pFpuCtx The FPU context.
7171 */
7172DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7173{
7174 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7175 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7176 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7177 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7178 {
7179 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7180 * happens in real mode here based on the fnsave and fnstenv images. */
7181 pFpuCtx->CS = 0;
7182 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7183 }
7184 else
7185 {
7186 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7187 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7188 }
7189}
7190
7191
7192/**
7193 * Updates the x87.DS and FPUDP registers.
7194 *
7195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7196 * @param pFpuCtx The FPU context.
7197 * @param iEffSeg The effective segment register.
7198 * @param GCPtrEff The effective address relative to @a iEffSeg.
7199 */
7200DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7201{
7202 RTSEL sel;
7203 switch (iEffSeg)
7204 {
7205 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7206 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7207 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7208 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7209 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7210 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7211 default:
7212 AssertMsgFailed(("%d\n", iEffSeg));
7213 sel = pVCpu->cpum.GstCtx.ds.Sel;
7214 }
7215 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7216 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7217 {
7218 pFpuCtx->DS = 0;
7219 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7220 }
7221 else
7222 {
7223 pFpuCtx->DS = sel;
7224 pFpuCtx->FPUDP = GCPtrEff;
7225 }
7226}
7227
7228
7229/**
7230 * Rotates the stack registers in the push direction.
7231 *
7232 * @param pFpuCtx The FPU context.
7233 * @remarks This is a complete waste of time, but fxsave stores the registers in
7234 * stack order.
7235 */
7236DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7237{
7238 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7239 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7240 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7241 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7242 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7243 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7244 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7245 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7246 pFpuCtx->aRegs[0].r80 = r80Tmp;
7247}
7248
7249
7250/**
7251 * Rotates the stack registers in the pop direction.
7252 *
7253 * @param pFpuCtx The FPU context.
7254 * @remarks This is a complete waste of time, but fxsave stores the registers in
7255 * stack order.
7256 */
7257DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7258{
7259 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7260 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7261 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7262 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7263 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7264 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7265 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7266 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7267 pFpuCtx->aRegs[7].r80 = r80Tmp;
7268}
7269
7270
7271/**
7272 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7273 * exception prevents it.
7274 *
7275 * @param pResult The FPU operation result to push.
7276 * @param pFpuCtx The FPU context.
7277 */
7278IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7279{
7280 /* Update FSW and bail if there are pending exceptions afterwards. */
7281 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7282 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7283 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7284 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7285 {
7286 pFpuCtx->FSW = fFsw;
7287 return;
7288 }
7289
7290 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7291 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7292 {
7293 /* All is fine, push the actual value. */
7294 pFpuCtx->FTW |= RT_BIT(iNewTop);
7295 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7296 }
7297 else if (pFpuCtx->FCW & X86_FCW_IM)
7298 {
7299 /* Masked stack overflow, push QNaN. */
7300 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7301 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7302 }
7303 else
7304 {
7305 /* Raise stack overflow, don't push anything. */
7306 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7307 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7308 return;
7309 }
7310
7311 fFsw &= ~X86_FSW_TOP_MASK;
7312 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7313 pFpuCtx->FSW = fFsw;
7314
7315 iemFpuRotateStackPush(pFpuCtx);
7316}
7317
7318
7319/**
7320 * Stores a result in a FPU register and updates the FSW and FTW.
7321 *
7322 * @param pFpuCtx The FPU context.
7323 * @param pResult The result to store.
7324 * @param iStReg Which FPU register to store it in.
7325 */
7326IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7327{
7328 Assert(iStReg < 8);
7329 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7330 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7331 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7332 pFpuCtx->FTW |= RT_BIT(iReg);
7333 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7334}
7335
7336
7337/**
7338 * Only updates the FPU status word (FSW) with the result of the current
7339 * instruction.
7340 *
7341 * @param pFpuCtx The FPU context.
7342 * @param u16FSW The FSW output of the current instruction.
7343 */
7344IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7345{
7346 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7347 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7348}
7349
7350
7351/**
7352 * Pops one item off the FPU stack if no pending exception prevents it.
7353 *
7354 * @param pFpuCtx The FPU context.
7355 */
7356IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7357{
7358 /* Check pending exceptions. */
7359 uint16_t uFSW = pFpuCtx->FSW;
7360 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7361 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7362 return;
7363
7364 /* TOP--. */
7365 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7366 uFSW &= ~X86_FSW_TOP_MASK;
7367 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7368 pFpuCtx->FSW = uFSW;
7369
7370 /* Mark the previous ST0 as empty. */
7371 iOldTop >>= X86_FSW_TOP_SHIFT;
7372 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7373
7374 /* Rotate the registers. */
7375 iemFpuRotateStackPop(pFpuCtx);
7376}
7377
7378
7379/**
7380 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7381 *
7382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7383 * @param pResult The FPU operation result to push.
7384 */
7385IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7386{
7387 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7388 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7389 iemFpuMaybePushResult(pResult, pFpuCtx);
7390}
7391
7392
7393/**
7394 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7395 * and sets FPUDP and FPUDS.
7396 *
7397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7398 * @param pResult The FPU operation result to push.
7399 * @param iEffSeg The effective segment register.
7400 * @param GCPtrEff The effective address relative to @a iEffSeg.
7401 */
7402IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7403{
7404 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7405 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7406 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7407 iemFpuMaybePushResult(pResult, pFpuCtx);
7408}
7409
7410
7411/**
7412 * Replace ST0 with the first value and push the second onto the FPU stack,
7413 * unless a pending exception prevents it.
7414 *
7415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7416 * @param pResult The FPU operation result to store and push.
7417 */
7418IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7419{
7420 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7421 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7422
7423 /* Update FSW and bail if there are pending exceptions afterwards. */
7424 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7425 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7426 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7427 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7428 {
7429 pFpuCtx->FSW = fFsw;
7430 return;
7431 }
7432
7433 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7434 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7435 {
7436 /* All is fine, push the actual value. */
7437 pFpuCtx->FTW |= RT_BIT(iNewTop);
7438 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7439 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7440 }
7441 else if (pFpuCtx->FCW & X86_FCW_IM)
7442 {
7443 /* Masked stack overflow, push QNaN. */
7444 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7445 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7446 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7447 }
7448 else
7449 {
7450 /* Raise stack overflow, don't push anything. */
7451 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7452 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7453 return;
7454 }
7455
7456 fFsw &= ~X86_FSW_TOP_MASK;
7457 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7458 pFpuCtx->FSW = fFsw;
7459
7460 iemFpuRotateStackPush(pFpuCtx);
7461}
7462
7463
7464/**
7465 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7466 * FOP.
7467 *
7468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7469 * @param pResult The result to store.
7470 * @param iStReg Which FPU register to store it in.
7471 */
7472IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7473{
7474 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7475 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7476 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7477}
7478
7479
7480/**
7481 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7482 * FOP, and then pops the stack.
7483 *
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param pResult The result to store.
7486 * @param iStReg Which FPU register to store it in.
7487 */
7488IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7489{
7490 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7491 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7492 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7493 iemFpuMaybePopOne(pFpuCtx);
7494}
7495
7496
7497/**
7498 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7499 * FPUDP, and FPUDS.
7500 *
7501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7502 * @param pResult The result to store.
7503 * @param iStReg Which FPU register to store it in.
7504 * @param iEffSeg The effective memory operand selector register.
7505 * @param GCPtrEff The effective memory operand offset.
7506 */
7507IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7508 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7509{
7510 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7511 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7512 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7513 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7514}
7515
7516
7517/**
7518 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7519 * FPUDP, and FPUDS, and then pops the stack.
7520 *
7521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7522 * @param pResult The result to store.
7523 * @param iStReg Which FPU register to store it in.
7524 * @param iEffSeg The effective memory operand selector register.
7525 * @param GCPtrEff The effective memory operand offset.
7526 */
7527IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7528 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7529{
7530 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7531 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7532 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7533 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7534 iemFpuMaybePopOne(pFpuCtx);
7535}
7536
7537
7538/**
7539 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7540 *
7541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7542 */
7543IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7544{
7545 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7546 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7547}
7548
7549
7550/**
7551 * Marks the specified stack register as free (for FFREE).
7552 *
7553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7554 * @param iStReg The register to free.
7555 */
7556IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7557{
7558 Assert(iStReg < 8);
7559 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7560 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7561 pFpuCtx->FTW &= ~RT_BIT(iReg);
7562}
7563
7564
7565/**
7566 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7567 *
7568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7569 */
7570IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7571{
7572 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7573 uint16_t uFsw = pFpuCtx->FSW;
7574 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7575 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7576 uFsw &= ~X86_FSW_TOP_MASK;
7577 uFsw |= uTop;
7578 pFpuCtx->FSW = uFsw;
7579}
7580
7581
7582/**
7583 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7584 *
7585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7586 */
7587IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7588{
7589 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7590 uint16_t uFsw = pFpuCtx->FSW;
7591 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7592 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7593 uFsw &= ~X86_FSW_TOP_MASK;
7594 uFsw |= uTop;
7595 pFpuCtx->FSW = uFsw;
7596}
7597
7598
7599/**
7600 * Updates the FSW, FOP, FPUIP, and FPUCS.
7601 *
7602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7603 * @param u16FSW The FSW from the current instruction.
7604 */
7605IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7606{
7607 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7608 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7609 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7610}
7611
7612
7613/**
7614 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7615 *
7616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7617 * @param u16FSW The FSW from the current instruction.
7618 */
7619IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7620{
7621 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7622 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7623 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7624 iemFpuMaybePopOne(pFpuCtx);
7625}
7626
7627
7628/**
7629 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7630 *
7631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7632 * @param u16FSW The FSW from the current instruction.
7633 * @param iEffSeg The effective memory operand selector register.
7634 * @param GCPtrEff The effective memory operand offset.
7635 */
7636IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7637{
7638 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7639 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7640 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7641 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7642}
7643
7644
7645/**
7646 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7647 *
7648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7649 * @param u16FSW The FSW from the current instruction.
7650 */
7651IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7652{
7653 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7654 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7655 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7656 iemFpuMaybePopOne(pFpuCtx);
7657 iemFpuMaybePopOne(pFpuCtx);
7658}
7659
7660
7661/**
7662 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7663 *
7664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7665 * @param u16FSW The FSW from the current instruction.
7666 * @param iEffSeg The effective memory operand selector register.
7667 * @param GCPtrEff The effective memory operand offset.
7668 */
7669IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7670{
7671 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7672 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7673 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7674 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7675 iemFpuMaybePopOne(pFpuCtx);
7676}
7677
7678
7679/**
7680 * Worker routine for raising an FPU stack underflow exception.
7681 *
7682 * @param pFpuCtx The FPU context.
7683 * @param iStReg The stack register being accessed.
7684 */
7685IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7686{
7687 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7688 if (pFpuCtx->FCW & X86_FCW_IM)
7689 {
7690 /* Masked underflow. */
7691 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7692 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7693 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7694 if (iStReg != UINT8_MAX)
7695 {
7696 pFpuCtx->FTW |= RT_BIT(iReg);
7697 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7698 }
7699 }
7700 else
7701 {
7702 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7703 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7704 }
7705}
7706
7707
7708/**
7709 * Raises a FPU stack underflow exception.
7710 *
7711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7712 * @param iStReg The destination register that should be loaded
7713 * with QNaN if \#IS is not masked. Specify
7714 * UINT8_MAX if none (like for fcom).
7715 */
7716DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7717{
7718 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7719 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7720 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7721}
7722
7723
7724DECL_NO_INLINE(IEM_STATIC, void)
7725iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7726{
7727 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7728 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7729 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7730 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7731}
7732
7733
7734DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7735{
7736 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7737 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7738 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7739 iemFpuMaybePopOne(pFpuCtx);
7740}
7741
7742
7743DECL_NO_INLINE(IEM_STATIC, void)
7744iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7745{
7746 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7747 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7748 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7749 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7750 iemFpuMaybePopOne(pFpuCtx);
7751}
7752
7753
7754DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7755{
7756 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7757 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7758 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7759 iemFpuMaybePopOne(pFpuCtx);
7760 iemFpuMaybePopOne(pFpuCtx);
7761}
7762
7763
7764DECL_NO_INLINE(IEM_STATIC, void)
7765iemFpuStackPushUnderflow(PVMCPU pVCpu)
7766{
7767 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7768 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7769
7770 if (pFpuCtx->FCW & X86_FCW_IM)
7771 {
7772 /* Masked overflow - Push QNaN. */
7773 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7774 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7775 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7776 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7777 pFpuCtx->FTW |= RT_BIT(iNewTop);
7778 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7779 iemFpuRotateStackPush(pFpuCtx);
7780 }
7781 else
7782 {
7783 /* Exception pending - don't change TOP or the register stack. */
7784 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7785 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7786 }
7787}
7788
7789
7790DECL_NO_INLINE(IEM_STATIC, void)
7791iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7792{
7793 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7794 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7795
7796 if (pFpuCtx->FCW & X86_FCW_IM)
7797 {
7798 /* Masked overflow - Push QNaN. */
7799 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7800 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7801 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7802 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7803 pFpuCtx->FTW |= RT_BIT(iNewTop);
7804 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7805 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7806 iemFpuRotateStackPush(pFpuCtx);
7807 }
7808 else
7809 {
7810 /* Exception pending - don't change TOP or the register stack. */
7811 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7812 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7813 }
7814}
7815
7816
7817/**
7818 * Worker routine for raising an FPU stack overflow exception on a push.
7819 *
7820 * @param pFpuCtx The FPU context.
7821 */
7822IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7823{
7824 if (pFpuCtx->FCW & X86_FCW_IM)
7825 {
7826 /* Masked overflow. */
7827 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7828 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7829 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7830 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7831 pFpuCtx->FTW |= RT_BIT(iNewTop);
7832 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7833 iemFpuRotateStackPush(pFpuCtx);
7834 }
7835 else
7836 {
7837 /* Exception pending - don't change TOP or the register stack. */
7838 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7839 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7840 }
7841}
7842
7843
7844/**
7845 * Raises a FPU stack overflow exception on a push.
7846 *
7847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7848 */
7849DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7850{
7851 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7852 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7853 iemFpuStackPushOverflowOnly(pFpuCtx);
7854}
7855
7856
7857/**
7858 * Raises a FPU stack overflow exception on a push with a memory operand.
7859 *
7860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7861 * @param iEffSeg The effective memory operand selector register.
7862 * @param GCPtrEff The effective memory operand offset.
7863 */
7864DECL_NO_INLINE(IEM_STATIC, void)
7865iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7866{
7867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7868 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7869 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7870 iemFpuStackPushOverflowOnly(pFpuCtx);
7871}
7872
7873
7874IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7875{
7876 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7877 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7878 if (pFpuCtx->FTW & RT_BIT(iReg))
7879 return VINF_SUCCESS;
7880 return VERR_NOT_FOUND;
7881}
7882
7883
7884IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7885{
7886 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7887 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7888 if (pFpuCtx->FTW & RT_BIT(iReg))
7889 {
7890 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7891 return VINF_SUCCESS;
7892 }
7893 return VERR_NOT_FOUND;
7894}
7895
7896
7897IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7898 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7899{
7900 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7901 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7902 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7903 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7904 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7905 {
7906 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7907 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7908 return VINF_SUCCESS;
7909 }
7910 return VERR_NOT_FOUND;
7911}
7912
7913
7914IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7915{
7916 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7917 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7918 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7919 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7920 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7921 {
7922 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7923 return VINF_SUCCESS;
7924 }
7925 return VERR_NOT_FOUND;
7926}
7927
7928
7929/**
7930 * Updates the FPU exception status after FCW is changed.
7931 *
7932 * @param pFpuCtx The FPU context.
7933 */
7934IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7935{
7936 uint16_t u16Fsw = pFpuCtx->FSW;
7937 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7938 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7939 else
7940 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7941 pFpuCtx->FSW = u16Fsw;
7942}
7943
7944
7945/**
7946 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7947 *
7948 * @returns The full FTW.
7949 * @param pFpuCtx The FPU context.
7950 */
7951IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7952{
7953 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7954 uint16_t u16Ftw = 0;
7955 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7956 for (unsigned iSt = 0; iSt < 8; iSt++)
7957 {
7958 unsigned const iReg = (iSt + iTop) & 7;
7959 if (!(u8Ftw & RT_BIT(iReg)))
7960 u16Ftw |= 3 << (iReg * 2); /* empty */
7961 else
7962 {
7963 uint16_t uTag;
7964 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7965 if (pr80Reg->s.uExponent == 0x7fff)
7966 uTag = 2; /* Exponent is all 1's => Special. */
7967 else if (pr80Reg->s.uExponent == 0x0000)
7968 {
7969 if (pr80Reg->s.u64Mantissa == 0x0000)
7970 uTag = 1; /* All bits are zero => Zero. */
7971 else
7972 uTag = 2; /* Must be special. */
7973 }
7974 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7975 uTag = 0; /* Valid. */
7976 else
7977 uTag = 2; /* Must be special. */
7978
7979 u16Ftw |= uTag << (iReg * 2); /* empty */
7980 }
7981 }
7982
7983 return u16Ftw;
7984}
7985
7986
7987/**
7988 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7989 *
7990 * @returns The compressed FTW.
7991 * @param u16FullFtw The full FTW to convert.
7992 */
7993IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7994{
7995 uint8_t u8Ftw = 0;
7996 for (unsigned i = 0; i < 8; i++)
7997 {
7998 if ((u16FullFtw & 3) != 3 /*empty*/)
7999 u8Ftw |= RT_BIT(i);
8000 u16FullFtw >>= 2;
8001 }
8002
8003 return u8Ftw;
8004}
8005
8006/** @} */
8007
8008
8009/** @name Memory access.
8010 *
8011 * @{
8012 */
8013
8014
8015/**
8016 * Updates the IEMCPU::cbWritten counter if applicable.
8017 *
8018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8019 * @param fAccess The access being accounted for.
8020 * @param cbMem The access size.
8021 */
8022DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
8023{
8024 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8025 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8026 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8027}
8028
8029
8030/**
8031 * Checks if the given segment can be written to, raise the appropriate
8032 * exception if not.
8033 *
8034 * @returns VBox strict status code.
8035 *
8036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8037 * @param pHid Pointer to the hidden register.
8038 * @param iSegReg The register number.
8039 * @param pu64BaseAddr Where to return the base address to use for the
8040 * segment. (In 64-bit code it may differ from the
8041 * base in the hidden segment.)
8042 */
8043IEM_STATIC VBOXSTRICTRC
8044iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8045{
8046 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8047
8048 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8049 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8050 else
8051 {
8052 if (!pHid->Attr.n.u1Present)
8053 {
8054 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8055 AssertRelease(uSel == 0);
8056 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8057 return iemRaiseGeneralProtectionFault0(pVCpu);
8058 }
8059
8060 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8061 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8062 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8063 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8064 *pu64BaseAddr = pHid->u64Base;
8065 }
8066 return VINF_SUCCESS;
8067}
8068
8069
8070/**
8071 * Checks if the given segment can be read from, raise the appropriate
8072 * exception if not.
8073 *
8074 * @returns VBox strict status code.
8075 *
8076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8077 * @param pHid Pointer to the hidden register.
8078 * @param iSegReg The register number.
8079 * @param pu64BaseAddr Where to return the base address to use for the
8080 * segment. (In 64-bit code it may differ from the
8081 * base in the hidden segment.)
8082 */
8083IEM_STATIC VBOXSTRICTRC
8084iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8085{
8086 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8087
8088 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8089 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8090 else
8091 {
8092 if (!pHid->Attr.n.u1Present)
8093 {
8094 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8095 AssertRelease(uSel == 0);
8096 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8097 return iemRaiseGeneralProtectionFault0(pVCpu);
8098 }
8099
8100 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8101 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8102 *pu64BaseAddr = pHid->u64Base;
8103 }
8104 return VINF_SUCCESS;
8105}
8106
8107
8108/**
8109 * Applies the segment limit, base and attributes.
8110 *
8111 * This may raise a \#GP or \#SS.
8112 *
8113 * @returns VBox strict status code.
8114 *
8115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8116 * @param fAccess The kind of access which is being performed.
8117 * @param iSegReg The index of the segment register to apply.
8118 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8119 * TSS, ++).
8120 * @param cbMem The access size.
8121 * @param pGCPtrMem Pointer to the guest memory address to apply
8122 * segmentation to. Input and output parameter.
8123 */
8124IEM_STATIC VBOXSTRICTRC
8125iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8126{
8127 if (iSegReg == UINT8_MAX)
8128 return VINF_SUCCESS;
8129
8130 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8131 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8132 switch (pVCpu->iem.s.enmCpuMode)
8133 {
8134 case IEMMODE_16BIT:
8135 case IEMMODE_32BIT:
8136 {
8137 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8138 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8139
8140 if ( pSel->Attr.n.u1Present
8141 && !pSel->Attr.n.u1Unusable)
8142 {
8143 Assert(pSel->Attr.n.u1DescType);
8144 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8145 {
8146 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8147 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8148 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8149
8150 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8151 {
8152 /** @todo CPL check. */
8153 }
8154
8155 /*
8156 * There are two kinds of data selectors, normal and expand down.
8157 */
8158 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8159 {
8160 if ( GCPtrFirst32 > pSel->u32Limit
8161 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8162 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8163 }
8164 else
8165 {
8166 /*
8167 * The upper boundary is defined by the B bit, not the G bit!
8168 */
8169 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8170 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8171 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8172 }
8173 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8174 }
8175 else
8176 {
8177
8178 /*
8179 * Code selector and usually be used to read thru, writing is
8180 * only permitted in real and V8086 mode.
8181 */
8182 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8183 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8184 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8185 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8186 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8187
8188 if ( GCPtrFirst32 > pSel->u32Limit
8189 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8190 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8191
8192 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8193 {
8194 /** @todo CPL check. */
8195 }
8196
8197 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8198 }
8199 }
8200 else
8201 return iemRaiseGeneralProtectionFault0(pVCpu);
8202 return VINF_SUCCESS;
8203 }
8204
8205 case IEMMODE_64BIT:
8206 {
8207 RTGCPTR GCPtrMem = *pGCPtrMem;
8208 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8209 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8210
8211 Assert(cbMem >= 1);
8212 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8213 return VINF_SUCCESS;
8214 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8215 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8216 return iemRaiseGeneralProtectionFault0(pVCpu);
8217 }
8218
8219 default:
8220 AssertFailedReturn(VERR_IEM_IPE_7);
8221 }
8222}
8223
8224
8225/**
8226 * Translates a virtual address to a physical physical address and checks if we
8227 * can access the page as specified.
8228 *
8229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8230 * @param GCPtrMem The virtual address.
8231 * @param fAccess The intended access.
8232 * @param pGCPhysMem Where to return the physical address.
8233 */
8234IEM_STATIC VBOXSTRICTRC
8235iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8236{
8237 /** @todo Need a different PGM interface here. We're currently using
8238 * generic / REM interfaces. this won't cut it for R0 & RC. */
8239 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8240 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8241 RTGCPHYS GCPhys;
8242 uint64_t fFlags;
8243 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8244 if (RT_FAILURE(rc))
8245 {
8246 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8247 /** @todo Check unassigned memory in unpaged mode. */
8248 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8249 *pGCPhysMem = NIL_RTGCPHYS;
8250 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8251 }
8252
8253 /* If the page is writable and does not have the no-exec bit set, all
8254 access is allowed. Otherwise we'll have to check more carefully... */
8255 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8256 {
8257 /* Write to read only memory? */
8258 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8259 && !(fFlags & X86_PTE_RW)
8260 && ( (pVCpu->iem.s.uCpl == 3
8261 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8262 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8263 {
8264 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8265 *pGCPhysMem = NIL_RTGCPHYS;
8266 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8267 }
8268
8269 /* Kernel memory accessed by userland? */
8270 if ( !(fFlags & X86_PTE_US)
8271 && pVCpu->iem.s.uCpl == 3
8272 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8273 {
8274 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8275 *pGCPhysMem = NIL_RTGCPHYS;
8276 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8277 }
8278
8279 /* Executing non-executable memory? */
8280 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8281 && (fFlags & X86_PTE_PAE_NX)
8282 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8283 {
8284 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8285 *pGCPhysMem = NIL_RTGCPHYS;
8286 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8287 VERR_ACCESS_DENIED);
8288 }
8289 }
8290
8291 /*
8292 * Set the dirty / access flags.
8293 * ASSUMES this is set when the address is translated rather than on committ...
8294 */
8295 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8296 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8297 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8298 {
8299 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8300 AssertRC(rc2);
8301 }
8302
8303 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8304 *pGCPhysMem = GCPhys;
8305 return VINF_SUCCESS;
8306}
8307
8308
8309
8310/**
8311 * Maps a physical page.
8312 *
8313 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8315 * @param GCPhysMem The physical address.
8316 * @param fAccess The intended access.
8317 * @param ppvMem Where to return the mapping address.
8318 * @param pLock The PGM lock.
8319 */
8320IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8321{
8322#ifdef IEM_LOG_MEMORY_WRITES
8323 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8324 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8325#endif
8326
8327 /** @todo This API may require some improving later. A private deal with PGM
8328 * regarding locking and unlocking needs to be struct. A couple of TLBs
8329 * living in PGM, but with publicly accessible inlined access methods
8330 * could perhaps be an even better solution. */
8331 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8332 GCPhysMem,
8333 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8334 pVCpu->iem.s.fBypassHandlers,
8335 ppvMem,
8336 pLock);
8337 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8338 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8339
8340 return rc;
8341}
8342
8343
8344/**
8345 * Unmap a page previously mapped by iemMemPageMap.
8346 *
8347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8348 * @param GCPhysMem The physical address.
8349 * @param fAccess The intended access.
8350 * @param pvMem What iemMemPageMap returned.
8351 * @param pLock The PGM lock.
8352 */
8353DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8354{
8355 NOREF(pVCpu);
8356 NOREF(GCPhysMem);
8357 NOREF(fAccess);
8358 NOREF(pvMem);
8359 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8360}
8361
8362
8363/**
8364 * Looks up a memory mapping entry.
8365 *
8366 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8368 * @param pvMem The memory address.
8369 * @param fAccess The access to.
8370 */
8371DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8372{
8373 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8374 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8375 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8376 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8377 return 0;
8378 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8379 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8380 return 1;
8381 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8382 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8383 return 2;
8384 return VERR_NOT_FOUND;
8385}
8386
8387
8388/**
8389 * Finds a free memmap entry when using iNextMapping doesn't work.
8390 *
8391 * @returns Memory mapping index, 1024 on failure.
8392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8393 */
8394IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8395{
8396 /*
8397 * The easy case.
8398 */
8399 if (pVCpu->iem.s.cActiveMappings == 0)
8400 {
8401 pVCpu->iem.s.iNextMapping = 1;
8402 return 0;
8403 }
8404
8405 /* There should be enough mappings for all instructions. */
8406 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8407
8408 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8409 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8410 return i;
8411
8412 AssertFailedReturn(1024);
8413}
8414
8415
8416/**
8417 * Commits a bounce buffer that needs writing back and unmaps it.
8418 *
8419 * @returns Strict VBox status code.
8420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8421 * @param iMemMap The index of the buffer to commit.
8422 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8423 * Always false in ring-3, obviously.
8424 */
8425IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8426{
8427 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8428 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8429#ifdef IN_RING3
8430 Assert(!fPostponeFail);
8431 RT_NOREF_PV(fPostponeFail);
8432#endif
8433
8434 /*
8435 * Do the writing.
8436 */
8437 PVM pVM = pVCpu->CTX_SUFF(pVM);
8438 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8439 {
8440 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8441 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8442 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8443 if (!pVCpu->iem.s.fBypassHandlers)
8444 {
8445 /*
8446 * Carefully and efficiently dealing with access handler return
8447 * codes make this a little bloated.
8448 */
8449 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8451 pbBuf,
8452 cbFirst,
8453 PGMACCESSORIGIN_IEM);
8454 if (rcStrict == VINF_SUCCESS)
8455 {
8456 if (cbSecond)
8457 {
8458 rcStrict = PGMPhysWrite(pVM,
8459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8460 pbBuf + cbFirst,
8461 cbSecond,
8462 PGMACCESSORIGIN_IEM);
8463 if (rcStrict == VINF_SUCCESS)
8464 { /* nothing */ }
8465 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8466 {
8467 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8470 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8471 }
8472#ifndef IN_RING3
8473 else if (fPostponeFail)
8474 {
8475 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8476 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8478 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8479 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8480 return iemSetPassUpStatus(pVCpu, rcStrict);
8481 }
8482#endif
8483 else
8484 {
8485 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8486 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8488 return rcStrict;
8489 }
8490 }
8491 }
8492 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8493 {
8494 if (!cbSecond)
8495 {
8496 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8497 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8498 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8499 }
8500 else
8501 {
8502 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8503 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8504 pbBuf + cbFirst,
8505 cbSecond,
8506 PGMACCESSORIGIN_IEM);
8507 if (rcStrict2 == VINF_SUCCESS)
8508 {
8509 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8510 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8511 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8512 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8513 }
8514 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8515 {
8516 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8517 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8518 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8519 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8520 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8521 }
8522#ifndef IN_RING3
8523 else if (fPostponeFail)
8524 {
8525 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8526 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8527 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8528 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8529 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8530 return iemSetPassUpStatus(pVCpu, rcStrict);
8531 }
8532#endif
8533 else
8534 {
8535 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8536 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8537 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8538 return rcStrict2;
8539 }
8540 }
8541 }
8542#ifndef IN_RING3
8543 else if (fPostponeFail)
8544 {
8545 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8546 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8548 if (!cbSecond)
8549 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8550 else
8551 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8552 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8553 return iemSetPassUpStatus(pVCpu, rcStrict);
8554 }
8555#endif
8556 else
8557 {
8558 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8559 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8560 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8561 return rcStrict;
8562 }
8563 }
8564 else
8565 {
8566 /*
8567 * No access handlers, much simpler.
8568 */
8569 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8570 if (RT_SUCCESS(rc))
8571 {
8572 if (cbSecond)
8573 {
8574 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8575 if (RT_SUCCESS(rc))
8576 { /* likely */ }
8577 else
8578 {
8579 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8580 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8581 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8582 return rc;
8583 }
8584 }
8585 }
8586 else
8587 {
8588 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8589 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8590 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8591 return rc;
8592 }
8593 }
8594 }
8595
8596#if defined(IEM_LOG_MEMORY_WRITES)
8597 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8598 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8599 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8600 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8601 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8602 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8603
8604 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8605 g_cbIemWrote = cbWrote;
8606 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8607#endif
8608
8609 /*
8610 * Free the mapping entry.
8611 */
8612 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8613 Assert(pVCpu->iem.s.cActiveMappings != 0);
8614 pVCpu->iem.s.cActiveMappings--;
8615 return VINF_SUCCESS;
8616}
8617
8618
8619/**
8620 * iemMemMap worker that deals with a request crossing pages.
8621 */
8622IEM_STATIC VBOXSTRICTRC
8623iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8624{
8625 /*
8626 * Do the address translations.
8627 */
8628 RTGCPHYS GCPhysFirst;
8629 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8630 if (rcStrict != VINF_SUCCESS)
8631 return rcStrict;
8632
8633 RTGCPHYS GCPhysSecond;
8634 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8635 fAccess, &GCPhysSecond);
8636 if (rcStrict != VINF_SUCCESS)
8637 return rcStrict;
8638 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8639
8640 PVM pVM = pVCpu->CTX_SUFF(pVM);
8641
8642 /*
8643 * Read in the current memory content if it's a read, execute or partial
8644 * write access.
8645 */
8646 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8647 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8648 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8649
8650 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8651 {
8652 if (!pVCpu->iem.s.fBypassHandlers)
8653 {
8654 /*
8655 * Must carefully deal with access handler status codes here,
8656 * makes the code a bit bloated.
8657 */
8658 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8659 if (rcStrict == VINF_SUCCESS)
8660 {
8661 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8662 if (rcStrict == VINF_SUCCESS)
8663 { /*likely */ }
8664 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8665 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8666 else
8667 {
8668 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8669 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8670 return rcStrict;
8671 }
8672 }
8673 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8674 {
8675 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8676 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8677 {
8678 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8679 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8680 }
8681 else
8682 {
8683 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8684 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8685 return rcStrict2;
8686 }
8687 }
8688 else
8689 {
8690 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8691 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8692 return rcStrict;
8693 }
8694 }
8695 else
8696 {
8697 /*
8698 * No informational status codes here, much more straight forward.
8699 */
8700 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8701 if (RT_SUCCESS(rc))
8702 {
8703 Assert(rc == VINF_SUCCESS);
8704 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8705 if (RT_SUCCESS(rc))
8706 Assert(rc == VINF_SUCCESS);
8707 else
8708 {
8709 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8710 return rc;
8711 }
8712 }
8713 else
8714 {
8715 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8716 return rc;
8717 }
8718 }
8719 }
8720#ifdef VBOX_STRICT
8721 else
8722 memset(pbBuf, 0xcc, cbMem);
8723 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8724 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8725#endif
8726
8727 /*
8728 * Commit the bounce buffer entry.
8729 */
8730 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8731 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8732 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8733 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8734 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8735 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8736 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8737 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8738 pVCpu->iem.s.cActiveMappings++;
8739
8740 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8741 *ppvMem = pbBuf;
8742 return VINF_SUCCESS;
8743}
8744
8745
8746/**
8747 * iemMemMap woker that deals with iemMemPageMap failures.
8748 */
8749IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8750 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8751{
8752 /*
8753 * Filter out conditions we can handle and the ones which shouldn't happen.
8754 */
8755 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8756 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8757 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8758 {
8759 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8760 return rcMap;
8761 }
8762 pVCpu->iem.s.cPotentialExits++;
8763
8764 /*
8765 * Read in the current memory content if it's a read, execute or partial
8766 * write access.
8767 */
8768 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8769 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8770 {
8771 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8772 memset(pbBuf, 0xff, cbMem);
8773 else
8774 {
8775 int rc;
8776 if (!pVCpu->iem.s.fBypassHandlers)
8777 {
8778 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8779 if (rcStrict == VINF_SUCCESS)
8780 { /* nothing */ }
8781 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8782 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8783 else
8784 {
8785 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8786 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8787 return rcStrict;
8788 }
8789 }
8790 else
8791 {
8792 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8793 if (RT_SUCCESS(rc))
8794 { /* likely */ }
8795 else
8796 {
8797 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8798 GCPhysFirst, rc));
8799 return rc;
8800 }
8801 }
8802 }
8803 }
8804#ifdef VBOX_STRICT
8805 else
8806 memset(pbBuf, 0xcc, cbMem);
8807#endif
8808#ifdef VBOX_STRICT
8809 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8810 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8811#endif
8812
8813 /*
8814 * Commit the bounce buffer entry.
8815 */
8816 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8818 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8819 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8820 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8821 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8822 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8823 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8824 pVCpu->iem.s.cActiveMappings++;
8825
8826 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8827 *ppvMem = pbBuf;
8828 return VINF_SUCCESS;
8829}
8830
8831
8832
8833/**
8834 * Maps the specified guest memory for the given kind of access.
8835 *
8836 * This may be using bounce buffering of the memory if it's crossing a page
8837 * boundary or if there is an access handler installed for any of it. Because
8838 * of lock prefix guarantees, we're in for some extra clutter when this
8839 * happens.
8840 *
8841 * This may raise a \#GP, \#SS, \#PF or \#AC.
8842 *
8843 * @returns VBox strict status code.
8844 *
8845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8846 * @param ppvMem Where to return the pointer to the mapped
8847 * memory.
8848 * @param cbMem The number of bytes to map. This is usually 1,
8849 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8850 * string operations it can be up to a page.
8851 * @param iSegReg The index of the segment register to use for
8852 * this access. The base and limits are checked.
8853 * Use UINT8_MAX to indicate that no segmentation
8854 * is required (for IDT, GDT and LDT accesses).
8855 * @param GCPtrMem The address of the guest memory.
8856 * @param fAccess How the memory is being accessed. The
8857 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8858 * how to map the memory, while the
8859 * IEM_ACCESS_WHAT_XXX bit is used when raising
8860 * exceptions.
8861 */
8862IEM_STATIC VBOXSTRICTRC
8863iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8864{
8865 /*
8866 * Check the input and figure out which mapping entry to use.
8867 */
8868 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8869 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8870 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8871
8872 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8873 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8874 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8875 {
8876 iMemMap = iemMemMapFindFree(pVCpu);
8877 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8878 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8879 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8880 pVCpu->iem.s.aMemMappings[2].fAccess),
8881 VERR_IEM_IPE_9);
8882 }
8883
8884 /*
8885 * Map the memory, checking that we can actually access it. If something
8886 * slightly complicated happens, fall back on bounce buffering.
8887 */
8888 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8889 if (rcStrict != VINF_SUCCESS)
8890 return rcStrict;
8891
8892 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8893 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8894
8895 RTGCPHYS GCPhysFirst;
8896 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8897 if (rcStrict != VINF_SUCCESS)
8898 return rcStrict;
8899
8900 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8901 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8902 if (fAccess & IEM_ACCESS_TYPE_READ)
8903 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8904
8905 void *pvMem;
8906 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8907 if (rcStrict != VINF_SUCCESS)
8908 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8909
8910 /*
8911 * Fill in the mapping table entry.
8912 */
8913 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8914 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8915 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8916 pVCpu->iem.s.cActiveMappings++;
8917
8918 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8919 *ppvMem = pvMem;
8920
8921 return VINF_SUCCESS;
8922}
8923
8924
8925/**
8926 * Commits the guest memory if bounce buffered and unmaps it.
8927 *
8928 * @returns Strict VBox status code.
8929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8930 * @param pvMem The mapping.
8931 * @param fAccess The kind of access.
8932 */
8933IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8934{
8935 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8936 AssertReturn(iMemMap >= 0, iMemMap);
8937
8938 /* If it's bounce buffered, we may need to write back the buffer. */
8939 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8940 {
8941 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8942 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8943 }
8944 /* Otherwise unlock it. */
8945 else
8946 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8947
8948 /* Free the entry. */
8949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8950 Assert(pVCpu->iem.s.cActiveMappings != 0);
8951 pVCpu->iem.s.cActiveMappings--;
8952 return VINF_SUCCESS;
8953}
8954
8955#ifdef IEM_WITH_SETJMP
8956
8957/**
8958 * Maps the specified guest memory for the given kind of access, longjmp on
8959 * error.
8960 *
8961 * This may be using bounce buffering of the memory if it's crossing a page
8962 * boundary or if there is an access handler installed for any of it. Because
8963 * of lock prefix guarantees, we're in for some extra clutter when this
8964 * happens.
8965 *
8966 * This may raise a \#GP, \#SS, \#PF or \#AC.
8967 *
8968 * @returns Pointer to the mapped memory.
8969 *
8970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8971 * @param cbMem The number of bytes to map. This is usually 1,
8972 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8973 * string operations it can be up to a page.
8974 * @param iSegReg The index of the segment register to use for
8975 * this access. The base and limits are checked.
8976 * Use UINT8_MAX to indicate that no segmentation
8977 * is required (for IDT, GDT and LDT accesses).
8978 * @param GCPtrMem The address of the guest memory.
8979 * @param fAccess How the memory is being accessed. The
8980 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8981 * how to map the memory, while the
8982 * IEM_ACCESS_WHAT_XXX bit is used when raising
8983 * exceptions.
8984 */
8985IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8986{
8987 /*
8988 * Check the input and figure out which mapping entry to use.
8989 */
8990 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8991 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8992 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8993
8994 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8995 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8996 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8997 {
8998 iMemMap = iemMemMapFindFree(pVCpu);
8999 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9000 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9001 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9002 pVCpu->iem.s.aMemMappings[2].fAccess),
9003 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9004 }
9005
9006 /*
9007 * Map the memory, checking that we can actually access it. If something
9008 * slightly complicated happens, fall back on bounce buffering.
9009 */
9010 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9011 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9012 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9013
9014 /* Crossing a page boundary? */
9015 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9016 { /* No (likely). */ }
9017 else
9018 {
9019 void *pvMem;
9020 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9021 if (rcStrict == VINF_SUCCESS)
9022 return pvMem;
9023 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9024 }
9025
9026 RTGCPHYS GCPhysFirst;
9027 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9028 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9029 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9030
9031 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9032 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9033 if (fAccess & IEM_ACCESS_TYPE_READ)
9034 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9035
9036 void *pvMem;
9037 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9038 if (rcStrict == VINF_SUCCESS)
9039 { /* likely */ }
9040 else
9041 {
9042 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9043 if (rcStrict == VINF_SUCCESS)
9044 return pvMem;
9045 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9046 }
9047
9048 /*
9049 * Fill in the mapping table entry.
9050 */
9051 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9052 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9053 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9054 pVCpu->iem.s.cActiveMappings++;
9055
9056 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9057 return pvMem;
9058}
9059
9060
9061/**
9062 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9063 *
9064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9065 * @param pvMem The mapping.
9066 * @param fAccess The kind of access.
9067 */
9068IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9069{
9070 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9071 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9072
9073 /* If it's bounce buffered, we may need to write back the buffer. */
9074 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9075 {
9076 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9077 {
9078 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9079 if (rcStrict == VINF_SUCCESS)
9080 return;
9081 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9082 }
9083 }
9084 /* Otherwise unlock it. */
9085 else
9086 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9087
9088 /* Free the entry. */
9089 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9090 Assert(pVCpu->iem.s.cActiveMappings != 0);
9091 pVCpu->iem.s.cActiveMappings--;
9092}
9093
9094#endif /* IEM_WITH_SETJMP */
9095
9096#ifndef IN_RING3
9097/**
9098 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9099 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9100 *
9101 * Allows the instruction to be completed and retired, while the IEM user will
9102 * return to ring-3 immediately afterwards and do the postponed writes there.
9103 *
9104 * @returns VBox status code (no strict statuses). Caller must check
9105 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9107 * @param pvMem The mapping.
9108 * @param fAccess The kind of access.
9109 */
9110IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9111{
9112 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9113 AssertReturn(iMemMap >= 0, iMemMap);
9114
9115 /* If it's bounce buffered, we may need to write back the buffer. */
9116 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9117 {
9118 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9119 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9120 }
9121 /* Otherwise unlock it. */
9122 else
9123 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9124
9125 /* Free the entry. */
9126 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9127 Assert(pVCpu->iem.s.cActiveMappings != 0);
9128 pVCpu->iem.s.cActiveMappings--;
9129 return VINF_SUCCESS;
9130}
9131#endif
9132
9133
9134/**
9135 * Rollbacks mappings, releasing page locks and such.
9136 *
9137 * The caller shall only call this after checking cActiveMappings.
9138 *
9139 * @returns Strict VBox status code to pass up.
9140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9141 */
9142IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9143{
9144 Assert(pVCpu->iem.s.cActiveMappings > 0);
9145
9146 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9147 while (iMemMap-- > 0)
9148 {
9149 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9150 if (fAccess != IEM_ACCESS_INVALID)
9151 {
9152 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9153 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9154 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9155 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9156 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9157 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9158 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9159 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9160 pVCpu->iem.s.cActiveMappings--;
9161 }
9162 }
9163}
9164
9165
9166/**
9167 * Fetches a data byte.
9168 *
9169 * @returns Strict VBox status code.
9170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9171 * @param pu8Dst Where to return the byte.
9172 * @param iSegReg The index of the segment register to use for
9173 * this access. The base and limits are checked.
9174 * @param GCPtrMem The address of the guest memory.
9175 */
9176IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9177{
9178 /* The lazy approach for now... */
9179 uint8_t const *pu8Src;
9180 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9181 if (rc == VINF_SUCCESS)
9182 {
9183 *pu8Dst = *pu8Src;
9184 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9185 }
9186 return rc;
9187}
9188
9189
9190#ifdef IEM_WITH_SETJMP
9191/**
9192 * Fetches a data byte, longjmp on error.
9193 *
9194 * @returns The byte.
9195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9196 * @param iSegReg The index of the segment register to use for
9197 * this access. The base and limits are checked.
9198 * @param GCPtrMem The address of the guest memory.
9199 */
9200DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9201{
9202 /* The lazy approach for now... */
9203 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9204 uint8_t const bRet = *pu8Src;
9205 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9206 return bRet;
9207}
9208#endif /* IEM_WITH_SETJMP */
9209
9210
9211/**
9212 * Fetches a data word.
9213 *
9214 * @returns Strict VBox status code.
9215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9216 * @param pu16Dst Where to return the word.
9217 * @param iSegReg The index of the segment register to use for
9218 * this access. The base and limits are checked.
9219 * @param GCPtrMem The address of the guest memory.
9220 */
9221IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9222{
9223 /* The lazy approach for now... */
9224 uint16_t const *pu16Src;
9225 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9226 if (rc == VINF_SUCCESS)
9227 {
9228 *pu16Dst = *pu16Src;
9229 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9230 }
9231 return rc;
9232}
9233
9234
9235#ifdef IEM_WITH_SETJMP
9236/**
9237 * Fetches a data word, longjmp on error.
9238 *
9239 * @returns The word
9240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9241 * @param iSegReg The index of the segment register to use for
9242 * this access. The base and limits are checked.
9243 * @param GCPtrMem The address of the guest memory.
9244 */
9245DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9246{
9247 /* The lazy approach for now... */
9248 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9249 uint16_t const u16Ret = *pu16Src;
9250 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9251 return u16Ret;
9252}
9253#endif
9254
9255
9256/**
9257 * Fetches a data dword.
9258 *
9259 * @returns Strict VBox status code.
9260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9261 * @param pu32Dst Where to return the dword.
9262 * @param iSegReg The index of the segment register to use for
9263 * this access. The base and limits are checked.
9264 * @param GCPtrMem The address of the guest memory.
9265 */
9266IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9267{
9268 /* The lazy approach for now... */
9269 uint32_t const *pu32Src;
9270 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9271 if (rc == VINF_SUCCESS)
9272 {
9273 *pu32Dst = *pu32Src;
9274 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9275 }
9276 return rc;
9277}
9278
9279
9280#ifdef IEM_WITH_SETJMP
9281
9282IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9283{
9284 Assert(cbMem >= 1);
9285 Assert(iSegReg < X86_SREG_COUNT);
9286
9287 /*
9288 * 64-bit mode is simpler.
9289 */
9290 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9291 {
9292 if (iSegReg >= X86_SREG_FS)
9293 {
9294 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9295 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9296 GCPtrMem += pSel->u64Base;
9297 }
9298
9299 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9300 return GCPtrMem;
9301 }
9302 /*
9303 * 16-bit and 32-bit segmentation.
9304 */
9305 else
9306 {
9307 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9308 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9309 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9310 == X86DESCATTR_P /* data, expand up */
9311 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9312 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9313 {
9314 /* expand up */
9315 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9316 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9317 && GCPtrLast32 > (uint32_t)GCPtrMem))
9318 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9319 }
9320 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9321 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9322 {
9323 /* expand down */
9324 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9325 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9326 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9327 && GCPtrLast32 > (uint32_t)GCPtrMem))
9328 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9329 }
9330 else
9331 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9332 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9333 }
9334 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9335}
9336
9337
9338IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9339{
9340 Assert(cbMem >= 1);
9341 Assert(iSegReg < X86_SREG_COUNT);
9342
9343 /*
9344 * 64-bit mode is simpler.
9345 */
9346 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9347 {
9348 if (iSegReg >= X86_SREG_FS)
9349 {
9350 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9351 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9352 GCPtrMem += pSel->u64Base;
9353 }
9354
9355 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9356 return GCPtrMem;
9357 }
9358 /*
9359 * 16-bit and 32-bit segmentation.
9360 */
9361 else
9362 {
9363 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9364 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9365 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9366 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9367 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9368 {
9369 /* expand up */
9370 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9371 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9372 && GCPtrLast32 > (uint32_t)GCPtrMem))
9373 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9374 }
9375 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9376 {
9377 /* expand down */
9378 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9379 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9380 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9381 && GCPtrLast32 > (uint32_t)GCPtrMem))
9382 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9383 }
9384 else
9385 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9386 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9387 }
9388 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9389}
9390
9391
9392/**
9393 * Fetches a data dword, longjmp on error, fallback/safe version.
9394 *
9395 * @returns The dword
9396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9397 * @param iSegReg The index of the segment register to use for
9398 * this access. The base and limits are checked.
9399 * @param GCPtrMem The address of the guest memory.
9400 */
9401IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9402{
9403 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9404 uint32_t const u32Ret = *pu32Src;
9405 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9406 return u32Ret;
9407}
9408
9409
9410/**
9411 * Fetches a data dword, longjmp on error.
9412 *
9413 * @returns The dword
9414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9415 * @param iSegReg The index of the segment register to use for
9416 * this access. The base and limits are checked.
9417 * @param GCPtrMem The address of the guest memory.
9418 */
9419DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9420{
9421# ifdef IEM_WITH_DATA_TLB
9422 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9423 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9424 {
9425 /// @todo more later.
9426 }
9427
9428 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9429# else
9430 /* The lazy approach. */
9431 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9432 uint32_t const u32Ret = *pu32Src;
9433 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9434 return u32Ret;
9435# endif
9436}
9437#endif
9438
9439
9440#ifdef SOME_UNUSED_FUNCTION
9441/**
9442 * Fetches a data dword and sign extends it to a qword.
9443 *
9444 * @returns Strict VBox status code.
9445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9446 * @param pu64Dst Where to return the sign extended value.
9447 * @param iSegReg The index of the segment register to use for
9448 * this access. The base and limits are checked.
9449 * @param GCPtrMem The address of the guest memory.
9450 */
9451IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9452{
9453 /* The lazy approach for now... */
9454 int32_t const *pi32Src;
9455 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9456 if (rc == VINF_SUCCESS)
9457 {
9458 *pu64Dst = *pi32Src;
9459 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9460 }
9461#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9462 else
9463 *pu64Dst = 0;
9464#endif
9465 return rc;
9466}
9467#endif
9468
9469
9470/**
9471 * Fetches a data qword.
9472 *
9473 * @returns Strict VBox status code.
9474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9475 * @param pu64Dst Where to return the qword.
9476 * @param iSegReg The index of the segment register to use for
9477 * this access. The base and limits are checked.
9478 * @param GCPtrMem The address of the guest memory.
9479 */
9480IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9481{
9482 /* The lazy approach for now... */
9483 uint64_t const *pu64Src;
9484 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9485 if (rc == VINF_SUCCESS)
9486 {
9487 *pu64Dst = *pu64Src;
9488 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9489 }
9490 return rc;
9491}
9492
9493
9494#ifdef IEM_WITH_SETJMP
9495/**
9496 * Fetches a data qword, longjmp on error.
9497 *
9498 * @returns The qword.
9499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9500 * @param iSegReg The index of the segment register to use for
9501 * this access. The base and limits are checked.
9502 * @param GCPtrMem The address of the guest memory.
9503 */
9504DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9505{
9506 /* The lazy approach for now... */
9507 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9508 uint64_t const u64Ret = *pu64Src;
9509 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9510 return u64Ret;
9511}
9512#endif
9513
9514
9515/**
9516 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9517 *
9518 * @returns Strict VBox status code.
9519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9520 * @param pu64Dst Where to return the qword.
9521 * @param iSegReg The index of the segment register to use for
9522 * this access. The base and limits are checked.
9523 * @param GCPtrMem The address of the guest memory.
9524 */
9525IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9526{
9527 /* The lazy approach for now... */
9528 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9529 if (RT_UNLIKELY(GCPtrMem & 15))
9530 return iemRaiseGeneralProtectionFault0(pVCpu);
9531
9532 uint64_t const *pu64Src;
9533 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9534 if (rc == VINF_SUCCESS)
9535 {
9536 *pu64Dst = *pu64Src;
9537 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9538 }
9539 return rc;
9540}
9541
9542
9543#ifdef IEM_WITH_SETJMP
9544/**
9545 * Fetches a data qword, longjmp on error.
9546 *
9547 * @returns The qword.
9548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9549 * @param iSegReg The index of the segment register to use for
9550 * this access. The base and limits are checked.
9551 * @param GCPtrMem The address of the guest memory.
9552 */
9553DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9554{
9555 /* The lazy approach for now... */
9556 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9557 if (RT_LIKELY(!(GCPtrMem & 15)))
9558 {
9559 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9560 uint64_t const u64Ret = *pu64Src;
9561 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9562 return u64Ret;
9563 }
9564
9565 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9566 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9567}
9568#endif
9569
9570
9571/**
9572 * Fetches a data tword.
9573 *
9574 * @returns Strict VBox status code.
9575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9576 * @param pr80Dst Where to return the tword.
9577 * @param iSegReg The index of the segment register to use for
9578 * this access. The base and limits are checked.
9579 * @param GCPtrMem The address of the guest memory.
9580 */
9581IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9582{
9583 /* The lazy approach for now... */
9584 PCRTFLOAT80U pr80Src;
9585 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9586 if (rc == VINF_SUCCESS)
9587 {
9588 *pr80Dst = *pr80Src;
9589 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9590 }
9591 return rc;
9592}
9593
9594
9595#ifdef IEM_WITH_SETJMP
9596/**
9597 * Fetches a data tword, longjmp on error.
9598 *
9599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9600 * @param pr80Dst Where to return the tword.
9601 * @param iSegReg The index of the segment register to use for
9602 * this access. The base and limits are checked.
9603 * @param GCPtrMem The address of the guest memory.
9604 */
9605DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9606{
9607 /* The lazy approach for now... */
9608 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9609 *pr80Dst = *pr80Src;
9610 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9611}
9612#endif
9613
9614
9615/**
9616 * Fetches a data dqword (double qword), generally SSE related.
9617 *
9618 * @returns Strict VBox status code.
9619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9620 * @param pu128Dst Where to return the qword.
9621 * @param iSegReg The index of the segment register to use for
9622 * this access. The base and limits are checked.
9623 * @param GCPtrMem The address of the guest memory.
9624 */
9625IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9626{
9627 /* The lazy approach for now... */
9628 PCRTUINT128U pu128Src;
9629 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9630 if (rc == VINF_SUCCESS)
9631 {
9632 pu128Dst->au64[0] = pu128Src->au64[0];
9633 pu128Dst->au64[1] = pu128Src->au64[1];
9634 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9635 }
9636 return rc;
9637}
9638
9639
9640#ifdef IEM_WITH_SETJMP
9641/**
9642 * Fetches a data dqword (double qword), generally SSE related.
9643 *
9644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9645 * @param pu128Dst Where to return the qword.
9646 * @param iSegReg The index of the segment register to use for
9647 * this access. The base and limits are checked.
9648 * @param GCPtrMem The address of the guest memory.
9649 */
9650IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9651{
9652 /* The lazy approach for now... */
9653 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9654 pu128Dst->au64[0] = pu128Src->au64[0];
9655 pu128Dst->au64[1] = pu128Src->au64[1];
9656 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9657}
9658#endif
9659
9660
9661/**
9662 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9663 * related.
9664 *
9665 * Raises \#GP(0) if not aligned.
9666 *
9667 * @returns Strict VBox status code.
9668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9669 * @param pu128Dst Where to return the qword.
9670 * @param iSegReg The index of the segment register to use for
9671 * this access. The base and limits are checked.
9672 * @param GCPtrMem The address of the guest memory.
9673 */
9674IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9675{
9676 /* The lazy approach for now... */
9677 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9678 if ( (GCPtrMem & 15)
9679 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9680 return iemRaiseGeneralProtectionFault0(pVCpu);
9681
9682 PCRTUINT128U pu128Src;
9683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9684 if (rc == VINF_SUCCESS)
9685 {
9686 pu128Dst->au64[0] = pu128Src->au64[0];
9687 pu128Dst->au64[1] = pu128Src->au64[1];
9688 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9689 }
9690 return rc;
9691}
9692
9693
9694#ifdef IEM_WITH_SETJMP
9695/**
9696 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9697 * related, longjmp on error.
9698 *
9699 * Raises \#GP(0) if not aligned.
9700 *
9701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9702 * @param pu128Dst Where to return the qword.
9703 * @param iSegReg The index of the segment register to use for
9704 * this access. The base and limits are checked.
9705 * @param GCPtrMem The address of the guest memory.
9706 */
9707DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9708{
9709 /* The lazy approach for now... */
9710 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9711 if ( (GCPtrMem & 15) == 0
9712 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9713 {
9714 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9715 pu128Dst->au64[0] = pu128Src->au64[0];
9716 pu128Dst->au64[1] = pu128Src->au64[1];
9717 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9718 return;
9719 }
9720
9721 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9722 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9723}
9724#endif
9725
9726
9727/**
9728 * Fetches a data oword (octo word), generally AVX related.
9729 *
9730 * @returns Strict VBox status code.
9731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9732 * @param pu256Dst Where to return the qword.
9733 * @param iSegReg The index of the segment register to use for
9734 * this access. The base and limits are checked.
9735 * @param GCPtrMem The address of the guest memory.
9736 */
9737IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9738{
9739 /* The lazy approach for now... */
9740 PCRTUINT256U pu256Src;
9741 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9742 if (rc == VINF_SUCCESS)
9743 {
9744 pu256Dst->au64[0] = pu256Src->au64[0];
9745 pu256Dst->au64[1] = pu256Src->au64[1];
9746 pu256Dst->au64[2] = pu256Src->au64[2];
9747 pu256Dst->au64[3] = pu256Src->au64[3];
9748 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9749 }
9750 return rc;
9751}
9752
9753
9754#ifdef IEM_WITH_SETJMP
9755/**
9756 * Fetches a data oword (octo word), generally AVX related.
9757 *
9758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9759 * @param pu256Dst Where to return the qword.
9760 * @param iSegReg The index of the segment register to use for
9761 * this access. The base and limits are checked.
9762 * @param GCPtrMem The address of the guest memory.
9763 */
9764IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9765{
9766 /* The lazy approach for now... */
9767 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9768 pu256Dst->au64[0] = pu256Src->au64[0];
9769 pu256Dst->au64[1] = pu256Src->au64[1];
9770 pu256Dst->au64[2] = pu256Src->au64[2];
9771 pu256Dst->au64[3] = pu256Src->au64[3];
9772 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9773}
9774#endif
9775
9776
9777/**
9778 * Fetches a data oword (octo word) at an aligned address, generally AVX
9779 * related.
9780 *
9781 * Raises \#GP(0) if not aligned.
9782 *
9783 * @returns Strict VBox status code.
9784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9785 * @param pu256Dst Where to return the qword.
9786 * @param iSegReg The index of the segment register to use for
9787 * this access. The base and limits are checked.
9788 * @param GCPtrMem The address of the guest memory.
9789 */
9790IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9791{
9792 /* The lazy approach for now... */
9793 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9794 if (GCPtrMem & 31)
9795 return iemRaiseGeneralProtectionFault0(pVCpu);
9796
9797 PCRTUINT256U pu256Src;
9798 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9799 if (rc == VINF_SUCCESS)
9800 {
9801 pu256Dst->au64[0] = pu256Src->au64[0];
9802 pu256Dst->au64[1] = pu256Src->au64[1];
9803 pu256Dst->au64[2] = pu256Src->au64[2];
9804 pu256Dst->au64[3] = pu256Src->au64[3];
9805 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9806 }
9807 return rc;
9808}
9809
9810
9811#ifdef IEM_WITH_SETJMP
9812/**
9813 * Fetches a data oword (octo word) at an aligned address, generally AVX
9814 * related, longjmp on error.
9815 *
9816 * Raises \#GP(0) if not aligned.
9817 *
9818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9819 * @param pu256Dst Where to return the qword.
9820 * @param iSegReg The index of the segment register to use for
9821 * this access. The base and limits are checked.
9822 * @param GCPtrMem The address of the guest memory.
9823 */
9824DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9825{
9826 /* The lazy approach for now... */
9827 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9828 if ((GCPtrMem & 31) == 0)
9829 {
9830 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9831 pu256Dst->au64[0] = pu256Src->au64[0];
9832 pu256Dst->au64[1] = pu256Src->au64[1];
9833 pu256Dst->au64[2] = pu256Src->au64[2];
9834 pu256Dst->au64[3] = pu256Src->au64[3];
9835 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9836 return;
9837 }
9838
9839 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9840 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9841}
9842#endif
9843
9844
9845
9846/**
9847 * Fetches a descriptor register (lgdt, lidt).
9848 *
9849 * @returns Strict VBox status code.
9850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9851 * @param pcbLimit Where to return the limit.
9852 * @param pGCPtrBase Where to return the base.
9853 * @param iSegReg The index of the segment register to use for
9854 * this access. The base and limits are checked.
9855 * @param GCPtrMem The address of the guest memory.
9856 * @param enmOpSize The effective operand size.
9857 */
9858IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9859 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9860{
9861 /*
9862 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9863 * little special:
9864 * - The two reads are done separately.
9865 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9866 * - We suspect the 386 to actually commit the limit before the base in
9867 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9868 * don't try emulate this eccentric behavior, because it's not well
9869 * enough understood and rather hard to trigger.
9870 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9871 */
9872 VBOXSTRICTRC rcStrict;
9873 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9874 {
9875 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9876 if (rcStrict == VINF_SUCCESS)
9877 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9878 }
9879 else
9880 {
9881 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9882 if (enmOpSize == IEMMODE_32BIT)
9883 {
9884 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9885 {
9886 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9887 if (rcStrict == VINF_SUCCESS)
9888 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9889 }
9890 else
9891 {
9892 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9893 if (rcStrict == VINF_SUCCESS)
9894 {
9895 *pcbLimit = (uint16_t)uTmp;
9896 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9897 }
9898 }
9899 if (rcStrict == VINF_SUCCESS)
9900 *pGCPtrBase = uTmp;
9901 }
9902 else
9903 {
9904 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9905 if (rcStrict == VINF_SUCCESS)
9906 {
9907 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9908 if (rcStrict == VINF_SUCCESS)
9909 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9910 }
9911 }
9912 }
9913 return rcStrict;
9914}
9915
9916
9917
9918/**
9919 * Stores a data byte.
9920 *
9921 * @returns Strict VBox status code.
9922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9923 * @param iSegReg The index of the segment register to use for
9924 * this access. The base and limits are checked.
9925 * @param GCPtrMem The address of the guest memory.
9926 * @param u8Value The value to store.
9927 */
9928IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9929{
9930 /* The lazy approach for now... */
9931 uint8_t *pu8Dst;
9932 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9933 if (rc == VINF_SUCCESS)
9934 {
9935 *pu8Dst = u8Value;
9936 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9937 }
9938 return rc;
9939}
9940
9941
9942#ifdef IEM_WITH_SETJMP
9943/**
9944 * Stores a data byte, longjmp on error.
9945 *
9946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9947 * @param iSegReg The index of the segment register to use for
9948 * this access. The base and limits are checked.
9949 * @param GCPtrMem The address of the guest memory.
9950 * @param u8Value The value to store.
9951 */
9952IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9953{
9954 /* The lazy approach for now... */
9955 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9956 *pu8Dst = u8Value;
9957 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9958}
9959#endif
9960
9961
9962/**
9963 * Stores a data word.
9964 *
9965 * @returns Strict VBox status code.
9966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9967 * @param iSegReg The index of the segment register to use for
9968 * this access. The base and limits are checked.
9969 * @param GCPtrMem The address of the guest memory.
9970 * @param u16Value The value to store.
9971 */
9972IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9973{
9974 /* The lazy approach for now... */
9975 uint16_t *pu16Dst;
9976 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9977 if (rc == VINF_SUCCESS)
9978 {
9979 *pu16Dst = u16Value;
9980 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9981 }
9982 return rc;
9983}
9984
9985
9986#ifdef IEM_WITH_SETJMP
9987/**
9988 * Stores a data word, longjmp on error.
9989 *
9990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9991 * @param iSegReg The index of the segment register to use for
9992 * this access. The base and limits are checked.
9993 * @param GCPtrMem The address of the guest memory.
9994 * @param u16Value The value to store.
9995 */
9996IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9997{
9998 /* The lazy approach for now... */
9999 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10000 *pu16Dst = u16Value;
10001 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10002}
10003#endif
10004
10005
10006/**
10007 * Stores a data dword.
10008 *
10009 * @returns Strict VBox status code.
10010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10011 * @param iSegReg The index of the segment register to use for
10012 * this access. The base and limits are checked.
10013 * @param GCPtrMem The address of the guest memory.
10014 * @param u32Value The value to store.
10015 */
10016IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10017{
10018 /* The lazy approach for now... */
10019 uint32_t *pu32Dst;
10020 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10021 if (rc == VINF_SUCCESS)
10022 {
10023 *pu32Dst = u32Value;
10024 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10025 }
10026 return rc;
10027}
10028
10029
10030#ifdef IEM_WITH_SETJMP
10031/**
10032 * Stores a data dword.
10033 *
10034 * @returns Strict VBox status code.
10035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10036 * @param iSegReg The index of the segment register to use for
10037 * this access. The base and limits are checked.
10038 * @param GCPtrMem The address of the guest memory.
10039 * @param u32Value The value to store.
10040 */
10041IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10042{
10043 /* The lazy approach for now... */
10044 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10045 *pu32Dst = u32Value;
10046 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10047}
10048#endif
10049
10050
10051/**
10052 * Stores a data qword.
10053 *
10054 * @returns Strict VBox status code.
10055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10056 * @param iSegReg The index of the segment register to use for
10057 * this access. The base and limits are checked.
10058 * @param GCPtrMem The address of the guest memory.
10059 * @param u64Value The value to store.
10060 */
10061IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10062{
10063 /* The lazy approach for now... */
10064 uint64_t *pu64Dst;
10065 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10066 if (rc == VINF_SUCCESS)
10067 {
10068 *pu64Dst = u64Value;
10069 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10070 }
10071 return rc;
10072}
10073
10074
10075#ifdef IEM_WITH_SETJMP
10076/**
10077 * Stores a data qword, longjmp on error.
10078 *
10079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10080 * @param iSegReg The index of the segment register to use for
10081 * this access. The base and limits are checked.
10082 * @param GCPtrMem The address of the guest memory.
10083 * @param u64Value The value to store.
10084 */
10085IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10086{
10087 /* The lazy approach for now... */
10088 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10089 *pu64Dst = u64Value;
10090 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10091}
10092#endif
10093
10094
10095/**
10096 * Stores a data dqword.
10097 *
10098 * @returns Strict VBox status code.
10099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10100 * @param iSegReg The index of the segment register to use for
10101 * this access. The base and limits are checked.
10102 * @param GCPtrMem The address of the guest memory.
10103 * @param u128Value The value to store.
10104 */
10105IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10106{
10107 /* The lazy approach for now... */
10108 PRTUINT128U pu128Dst;
10109 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10110 if (rc == VINF_SUCCESS)
10111 {
10112 pu128Dst->au64[0] = u128Value.au64[0];
10113 pu128Dst->au64[1] = u128Value.au64[1];
10114 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10115 }
10116 return rc;
10117}
10118
10119
10120#ifdef IEM_WITH_SETJMP
10121/**
10122 * Stores a data dqword, longjmp on error.
10123 *
10124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10125 * @param iSegReg The index of the segment register to use for
10126 * this access. The base and limits are checked.
10127 * @param GCPtrMem The address of the guest memory.
10128 * @param u128Value The value to store.
10129 */
10130IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10131{
10132 /* The lazy approach for now... */
10133 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10134 pu128Dst->au64[0] = u128Value.au64[0];
10135 pu128Dst->au64[1] = u128Value.au64[1];
10136 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10137}
10138#endif
10139
10140
10141/**
10142 * Stores a data dqword, SSE aligned.
10143 *
10144 * @returns Strict VBox status code.
10145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10146 * @param iSegReg The index of the segment register to use for
10147 * this access. The base and limits are checked.
10148 * @param GCPtrMem The address of the guest memory.
10149 * @param u128Value The value to store.
10150 */
10151IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10152{
10153 /* The lazy approach for now... */
10154 if ( (GCPtrMem & 15)
10155 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10156 return iemRaiseGeneralProtectionFault0(pVCpu);
10157
10158 PRTUINT128U pu128Dst;
10159 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10160 if (rc == VINF_SUCCESS)
10161 {
10162 pu128Dst->au64[0] = u128Value.au64[0];
10163 pu128Dst->au64[1] = u128Value.au64[1];
10164 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10165 }
10166 return rc;
10167}
10168
10169
10170#ifdef IEM_WITH_SETJMP
10171/**
10172 * Stores a data dqword, SSE aligned.
10173 *
10174 * @returns Strict VBox status code.
10175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10176 * @param iSegReg The index of the segment register to use for
10177 * this access. The base and limits are checked.
10178 * @param GCPtrMem The address of the guest memory.
10179 * @param u128Value The value to store.
10180 */
10181DECL_NO_INLINE(IEM_STATIC, void)
10182iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10183{
10184 /* The lazy approach for now... */
10185 if ( (GCPtrMem & 15) == 0
10186 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10187 {
10188 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10189 pu128Dst->au64[0] = u128Value.au64[0];
10190 pu128Dst->au64[1] = u128Value.au64[1];
10191 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10192 return;
10193 }
10194
10195 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10196 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10197}
10198#endif
10199
10200
10201/**
10202 * Stores a data dqword.
10203 *
10204 * @returns Strict VBox status code.
10205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10206 * @param iSegReg The index of the segment register to use for
10207 * this access. The base and limits are checked.
10208 * @param GCPtrMem The address of the guest memory.
10209 * @param pu256Value Pointer to the value to store.
10210 */
10211IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10212{
10213 /* The lazy approach for now... */
10214 PRTUINT256U pu256Dst;
10215 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10216 if (rc == VINF_SUCCESS)
10217 {
10218 pu256Dst->au64[0] = pu256Value->au64[0];
10219 pu256Dst->au64[1] = pu256Value->au64[1];
10220 pu256Dst->au64[2] = pu256Value->au64[2];
10221 pu256Dst->au64[3] = pu256Value->au64[3];
10222 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10223 }
10224 return rc;
10225}
10226
10227
10228#ifdef IEM_WITH_SETJMP
10229/**
10230 * Stores a data dqword, longjmp on error.
10231 *
10232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10233 * @param iSegReg The index of the segment register to use for
10234 * this access. The base and limits are checked.
10235 * @param GCPtrMem The address of the guest memory.
10236 * @param pu256Value Pointer to the value to store.
10237 */
10238IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10239{
10240 /* The lazy approach for now... */
10241 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10242 pu256Dst->au64[0] = pu256Value->au64[0];
10243 pu256Dst->au64[1] = pu256Value->au64[1];
10244 pu256Dst->au64[2] = pu256Value->au64[2];
10245 pu256Dst->au64[3] = pu256Value->au64[3];
10246 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10247}
10248#endif
10249
10250
10251/**
10252 * Stores a data dqword, AVX aligned.
10253 *
10254 * @returns Strict VBox status code.
10255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10256 * @param iSegReg The index of the segment register to use for
10257 * this access. The base and limits are checked.
10258 * @param GCPtrMem The address of the guest memory.
10259 * @param pu256Value Pointer to the value to store.
10260 */
10261IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10262{
10263 /* The lazy approach for now... */
10264 if (GCPtrMem & 31)
10265 return iemRaiseGeneralProtectionFault0(pVCpu);
10266
10267 PRTUINT256U pu256Dst;
10268 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10269 if (rc == VINF_SUCCESS)
10270 {
10271 pu256Dst->au64[0] = pu256Value->au64[0];
10272 pu256Dst->au64[1] = pu256Value->au64[1];
10273 pu256Dst->au64[2] = pu256Value->au64[2];
10274 pu256Dst->au64[3] = pu256Value->au64[3];
10275 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10276 }
10277 return rc;
10278}
10279
10280
10281#ifdef IEM_WITH_SETJMP
10282/**
10283 * Stores a data dqword, AVX aligned.
10284 *
10285 * @returns Strict VBox status code.
10286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10287 * @param iSegReg The index of the segment register to use for
10288 * this access. The base and limits are checked.
10289 * @param GCPtrMem The address of the guest memory.
10290 * @param pu256Value Pointer to the value to store.
10291 */
10292DECL_NO_INLINE(IEM_STATIC, void)
10293iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10294{
10295 /* The lazy approach for now... */
10296 if ((GCPtrMem & 31) == 0)
10297 {
10298 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10299 pu256Dst->au64[0] = pu256Value->au64[0];
10300 pu256Dst->au64[1] = pu256Value->au64[1];
10301 pu256Dst->au64[2] = pu256Value->au64[2];
10302 pu256Dst->au64[3] = pu256Value->au64[3];
10303 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10304 return;
10305 }
10306
10307 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10308 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10309}
10310#endif
10311
10312
10313/**
10314 * Stores a descriptor register (sgdt, sidt).
10315 *
10316 * @returns Strict VBox status code.
10317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10318 * @param cbLimit The limit.
10319 * @param GCPtrBase The base address.
10320 * @param iSegReg The index of the segment register to use for
10321 * this access. The base and limits are checked.
10322 * @param GCPtrMem The address of the guest memory.
10323 */
10324IEM_STATIC VBOXSTRICTRC
10325iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10326{
10327 /*
10328 * The SIDT and SGDT instructions actually stores the data using two
10329 * independent writes. The instructions does not respond to opsize prefixes.
10330 */
10331 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10332 if (rcStrict == VINF_SUCCESS)
10333 {
10334 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10335 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10336 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10337 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10338 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10339 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10340 else
10341 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10342 }
10343 return rcStrict;
10344}
10345
10346
10347/**
10348 * Pushes a word onto the stack.
10349 *
10350 * @returns Strict VBox status code.
10351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10352 * @param u16Value The value to push.
10353 */
10354IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10355{
10356 /* Increment the stack pointer. */
10357 uint64_t uNewRsp;
10358 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10359
10360 /* Write the word the lazy way. */
10361 uint16_t *pu16Dst;
10362 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10363 if (rc == VINF_SUCCESS)
10364 {
10365 *pu16Dst = u16Value;
10366 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10367 }
10368
10369 /* Commit the new RSP value unless we an access handler made trouble. */
10370 if (rc == VINF_SUCCESS)
10371 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10372
10373 return rc;
10374}
10375
10376
10377/**
10378 * Pushes a dword onto the stack.
10379 *
10380 * @returns Strict VBox status code.
10381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10382 * @param u32Value The value to push.
10383 */
10384IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10385{
10386 /* Increment the stack pointer. */
10387 uint64_t uNewRsp;
10388 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10389
10390 /* Write the dword the lazy way. */
10391 uint32_t *pu32Dst;
10392 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10393 if (rc == VINF_SUCCESS)
10394 {
10395 *pu32Dst = u32Value;
10396 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10397 }
10398
10399 /* Commit the new RSP value unless we an access handler made trouble. */
10400 if (rc == VINF_SUCCESS)
10401 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10402
10403 return rc;
10404}
10405
10406
10407/**
10408 * Pushes a dword segment register value onto the stack.
10409 *
10410 * @returns Strict VBox status code.
10411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10412 * @param u32Value The value to push.
10413 */
10414IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10415{
10416 /* Increment the stack pointer. */
10417 uint64_t uNewRsp;
10418 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10419
10420 /* The intel docs talks about zero extending the selector register
10421 value. My actual intel CPU here might be zero extending the value
10422 but it still only writes the lower word... */
10423 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10424 * happens when crossing an electric page boundrary, is the high word checked
10425 * for write accessibility or not? Probably it is. What about segment limits?
10426 * It appears this behavior is also shared with trap error codes.
10427 *
10428 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10429 * ancient hardware when it actually did change. */
10430 uint16_t *pu16Dst;
10431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10432 if (rc == VINF_SUCCESS)
10433 {
10434 *pu16Dst = (uint16_t)u32Value;
10435 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10436 }
10437
10438 /* Commit the new RSP value unless we an access handler made trouble. */
10439 if (rc == VINF_SUCCESS)
10440 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10441
10442 return rc;
10443}
10444
10445
10446/**
10447 * Pushes a qword onto the stack.
10448 *
10449 * @returns Strict VBox status code.
10450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10451 * @param u64Value The value to push.
10452 */
10453IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10454{
10455 /* Increment the stack pointer. */
10456 uint64_t uNewRsp;
10457 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10458
10459 /* Write the word the lazy way. */
10460 uint64_t *pu64Dst;
10461 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10462 if (rc == VINF_SUCCESS)
10463 {
10464 *pu64Dst = u64Value;
10465 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10466 }
10467
10468 /* Commit the new RSP value unless we an access handler made trouble. */
10469 if (rc == VINF_SUCCESS)
10470 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10471
10472 return rc;
10473}
10474
10475
10476/**
10477 * Pops a word from the stack.
10478 *
10479 * @returns Strict VBox status code.
10480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10481 * @param pu16Value Where to store the popped value.
10482 */
10483IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10484{
10485 /* Increment the stack pointer. */
10486 uint64_t uNewRsp;
10487 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10488
10489 /* Write the word the lazy way. */
10490 uint16_t const *pu16Src;
10491 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10492 if (rc == VINF_SUCCESS)
10493 {
10494 *pu16Value = *pu16Src;
10495 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10496
10497 /* Commit the new RSP value. */
10498 if (rc == VINF_SUCCESS)
10499 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10500 }
10501
10502 return rc;
10503}
10504
10505
10506/**
10507 * Pops a dword from the stack.
10508 *
10509 * @returns Strict VBox status code.
10510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10511 * @param pu32Value Where to store the popped value.
10512 */
10513IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10514{
10515 /* Increment the stack pointer. */
10516 uint64_t uNewRsp;
10517 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10518
10519 /* Write the word the lazy way. */
10520 uint32_t const *pu32Src;
10521 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10522 if (rc == VINF_SUCCESS)
10523 {
10524 *pu32Value = *pu32Src;
10525 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10526
10527 /* Commit the new RSP value. */
10528 if (rc == VINF_SUCCESS)
10529 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10530 }
10531
10532 return rc;
10533}
10534
10535
10536/**
10537 * Pops a qword from the stack.
10538 *
10539 * @returns Strict VBox status code.
10540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10541 * @param pu64Value Where to store the popped value.
10542 */
10543IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10544{
10545 /* Increment the stack pointer. */
10546 uint64_t uNewRsp;
10547 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10548
10549 /* Write the word the lazy way. */
10550 uint64_t const *pu64Src;
10551 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10552 if (rc == VINF_SUCCESS)
10553 {
10554 *pu64Value = *pu64Src;
10555 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10556
10557 /* Commit the new RSP value. */
10558 if (rc == VINF_SUCCESS)
10559 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10560 }
10561
10562 return rc;
10563}
10564
10565
10566/**
10567 * Pushes a word onto the stack, using a temporary stack pointer.
10568 *
10569 * @returns Strict VBox status code.
10570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10571 * @param u16Value The value to push.
10572 * @param pTmpRsp Pointer to the temporary stack pointer.
10573 */
10574IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10575{
10576 /* Increment the stack pointer. */
10577 RTUINT64U NewRsp = *pTmpRsp;
10578 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10579
10580 /* Write the word the lazy way. */
10581 uint16_t *pu16Dst;
10582 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10583 if (rc == VINF_SUCCESS)
10584 {
10585 *pu16Dst = u16Value;
10586 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10587 }
10588
10589 /* Commit the new RSP value unless we an access handler made trouble. */
10590 if (rc == VINF_SUCCESS)
10591 *pTmpRsp = NewRsp;
10592
10593 return rc;
10594}
10595
10596
10597/**
10598 * Pushes a dword onto the stack, using a temporary stack pointer.
10599 *
10600 * @returns Strict VBox status code.
10601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10602 * @param u32Value The value to push.
10603 * @param pTmpRsp Pointer to the temporary stack pointer.
10604 */
10605IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10606{
10607 /* Increment the stack pointer. */
10608 RTUINT64U NewRsp = *pTmpRsp;
10609 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10610
10611 /* Write the word the lazy way. */
10612 uint32_t *pu32Dst;
10613 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10614 if (rc == VINF_SUCCESS)
10615 {
10616 *pu32Dst = u32Value;
10617 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10618 }
10619
10620 /* Commit the new RSP value unless we an access handler made trouble. */
10621 if (rc == VINF_SUCCESS)
10622 *pTmpRsp = NewRsp;
10623
10624 return rc;
10625}
10626
10627
10628/**
10629 * Pushes a dword onto the stack, using a temporary stack pointer.
10630 *
10631 * @returns Strict VBox status code.
10632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10633 * @param u64Value The value to push.
10634 * @param pTmpRsp Pointer to the temporary stack pointer.
10635 */
10636IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10637{
10638 /* Increment the stack pointer. */
10639 RTUINT64U NewRsp = *pTmpRsp;
10640 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10641
10642 /* Write the word the lazy way. */
10643 uint64_t *pu64Dst;
10644 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10645 if (rc == VINF_SUCCESS)
10646 {
10647 *pu64Dst = u64Value;
10648 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10649 }
10650
10651 /* Commit the new RSP value unless we an access handler made trouble. */
10652 if (rc == VINF_SUCCESS)
10653 *pTmpRsp = NewRsp;
10654
10655 return rc;
10656}
10657
10658
10659/**
10660 * Pops a word from the stack, using a temporary stack pointer.
10661 *
10662 * @returns Strict VBox status code.
10663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10664 * @param pu16Value Where to store the popped value.
10665 * @param pTmpRsp Pointer to the temporary stack pointer.
10666 */
10667IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10668{
10669 /* Increment the stack pointer. */
10670 RTUINT64U NewRsp = *pTmpRsp;
10671 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10672
10673 /* Write the word the lazy way. */
10674 uint16_t const *pu16Src;
10675 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10676 if (rc == VINF_SUCCESS)
10677 {
10678 *pu16Value = *pu16Src;
10679 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10680
10681 /* Commit the new RSP value. */
10682 if (rc == VINF_SUCCESS)
10683 *pTmpRsp = NewRsp;
10684 }
10685
10686 return rc;
10687}
10688
10689
10690/**
10691 * Pops a dword from the stack, using a temporary stack pointer.
10692 *
10693 * @returns Strict VBox status code.
10694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10695 * @param pu32Value Where to store the popped value.
10696 * @param pTmpRsp Pointer to the temporary stack pointer.
10697 */
10698IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10699{
10700 /* Increment the stack pointer. */
10701 RTUINT64U NewRsp = *pTmpRsp;
10702 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10703
10704 /* Write the word the lazy way. */
10705 uint32_t const *pu32Src;
10706 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10707 if (rc == VINF_SUCCESS)
10708 {
10709 *pu32Value = *pu32Src;
10710 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10711
10712 /* Commit the new RSP value. */
10713 if (rc == VINF_SUCCESS)
10714 *pTmpRsp = NewRsp;
10715 }
10716
10717 return rc;
10718}
10719
10720
10721/**
10722 * Pops a qword from the stack, using a temporary stack pointer.
10723 *
10724 * @returns Strict VBox status code.
10725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10726 * @param pu64Value Where to store the popped value.
10727 * @param pTmpRsp Pointer to the temporary stack pointer.
10728 */
10729IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10730{
10731 /* Increment the stack pointer. */
10732 RTUINT64U NewRsp = *pTmpRsp;
10733 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10734
10735 /* Write the word the lazy way. */
10736 uint64_t const *pu64Src;
10737 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10738 if (rcStrict == VINF_SUCCESS)
10739 {
10740 *pu64Value = *pu64Src;
10741 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10742
10743 /* Commit the new RSP value. */
10744 if (rcStrict == VINF_SUCCESS)
10745 *pTmpRsp = NewRsp;
10746 }
10747
10748 return rcStrict;
10749}
10750
10751
10752/**
10753 * Begin a special stack push (used by interrupt, exceptions and such).
10754 *
10755 * This will raise \#SS or \#PF if appropriate.
10756 *
10757 * @returns Strict VBox status code.
10758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10759 * @param cbMem The number of bytes to push onto the stack.
10760 * @param ppvMem Where to return the pointer to the stack memory.
10761 * As with the other memory functions this could be
10762 * direct access or bounce buffered access, so
10763 * don't commit register until the commit call
10764 * succeeds.
10765 * @param puNewRsp Where to return the new RSP value. This must be
10766 * passed unchanged to
10767 * iemMemStackPushCommitSpecial().
10768 */
10769IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10770{
10771 Assert(cbMem < UINT8_MAX);
10772 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10773 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10774}
10775
10776
10777/**
10778 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10779 *
10780 * This will update the rSP.
10781 *
10782 * @returns Strict VBox status code.
10783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10784 * @param pvMem The pointer returned by
10785 * iemMemStackPushBeginSpecial().
10786 * @param uNewRsp The new RSP value returned by
10787 * iemMemStackPushBeginSpecial().
10788 */
10789IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10790{
10791 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10792 if (rcStrict == VINF_SUCCESS)
10793 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10794 return rcStrict;
10795}
10796
10797
10798/**
10799 * Begin a special stack pop (used by iret, retf and such).
10800 *
10801 * This will raise \#SS or \#PF if appropriate.
10802 *
10803 * @returns Strict VBox status code.
10804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10805 * @param cbMem The number of bytes to pop from the stack.
10806 * @param ppvMem Where to return the pointer to the stack memory.
10807 * @param puNewRsp Where to return the new RSP value. This must be
10808 * assigned to CPUMCTX::rsp manually some time
10809 * after iemMemStackPopDoneSpecial() has been
10810 * called.
10811 */
10812IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10813{
10814 Assert(cbMem < UINT8_MAX);
10815 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10816 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10817}
10818
10819
10820/**
10821 * Continue a special stack pop (used by iret and retf).
10822 *
10823 * This will raise \#SS or \#PF if appropriate.
10824 *
10825 * @returns Strict VBox status code.
10826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10827 * @param cbMem The number of bytes to pop from the stack.
10828 * @param ppvMem Where to return the pointer to the stack memory.
10829 * @param puNewRsp Where to return the new RSP value. This must be
10830 * assigned to CPUMCTX::rsp manually some time
10831 * after iemMemStackPopDoneSpecial() has been
10832 * called.
10833 */
10834IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10835{
10836 Assert(cbMem < UINT8_MAX);
10837 RTUINT64U NewRsp;
10838 NewRsp.u = *puNewRsp;
10839 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10840 *puNewRsp = NewRsp.u;
10841 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10842}
10843
10844
10845/**
10846 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10847 * iemMemStackPopContinueSpecial).
10848 *
10849 * The caller will manually commit the rSP.
10850 *
10851 * @returns Strict VBox status code.
10852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10853 * @param pvMem The pointer returned by
10854 * iemMemStackPopBeginSpecial() or
10855 * iemMemStackPopContinueSpecial().
10856 */
10857IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10858{
10859 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10860}
10861
10862
10863/**
10864 * Fetches a system table byte.
10865 *
10866 * @returns Strict VBox status code.
10867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10868 * @param pbDst Where to return the byte.
10869 * @param iSegReg The index of the segment register to use for
10870 * this access. The base and limits are checked.
10871 * @param GCPtrMem The address of the guest memory.
10872 */
10873IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10874{
10875 /* The lazy approach for now... */
10876 uint8_t const *pbSrc;
10877 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10878 if (rc == VINF_SUCCESS)
10879 {
10880 *pbDst = *pbSrc;
10881 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10882 }
10883 return rc;
10884}
10885
10886
10887/**
10888 * Fetches a system table word.
10889 *
10890 * @returns Strict VBox status code.
10891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10892 * @param pu16Dst Where to return the word.
10893 * @param iSegReg The index of the segment register to use for
10894 * this access. The base and limits are checked.
10895 * @param GCPtrMem The address of the guest memory.
10896 */
10897IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10898{
10899 /* The lazy approach for now... */
10900 uint16_t const *pu16Src;
10901 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10902 if (rc == VINF_SUCCESS)
10903 {
10904 *pu16Dst = *pu16Src;
10905 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10906 }
10907 return rc;
10908}
10909
10910
10911/**
10912 * Fetches a system table dword.
10913 *
10914 * @returns Strict VBox status code.
10915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10916 * @param pu32Dst Where to return the dword.
10917 * @param iSegReg The index of the segment register to use for
10918 * this access. The base and limits are checked.
10919 * @param GCPtrMem The address of the guest memory.
10920 */
10921IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10922{
10923 /* The lazy approach for now... */
10924 uint32_t const *pu32Src;
10925 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10926 if (rc == VINF_SUCCESS)
10927 {
10928 *pu32Dst = *pu32Src;
10929 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10930 }
10931 return rc;
10932}
10933
10934
10935/**
10936 * Fetches a system table qword.
10937 *
10938 * @returns Strict VBox status code.
10939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10940 * @param pu64Dst Where to return the qword.
10941 * @param iSegReg The index of the segment register to use for
10942 * this access. The base and limits are checked.
10943 * @param GCPtrMem The address of the guest memory.
10944 */
10945IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10946{
10947 /* The lazy approach for now... */
10948 uint64_t const *pu64Src;
10949 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10950 if (rc == VINF_SUCCESS)
10951 {
10952 *pu64Dst = *pu64Src;
10953 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10954 }
10955 return rc;
10956}
10957
10958
10959/**
10960 * Fetches a descriptor table entry with caller specified error code.
10961 *
10962 * @returns Strict VBox status code.
10963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10964 * @param pDesc Where to return the descriptor table entry.
10965 * @param uSel The selector which table entry to fetch.
10966 * @param uXcpt The exception to raise on table lookup error.
10967 * @param uErrorCode The error code associated with the exception.
10968 */
10969IEM_STATIC VBOXSTRICTRC
10970iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10971{
10972 AssertPtr(pDesc);
10973 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10974
10975 /** @todo did the 286 require all 8 bytes to be accessible? */
10976 /*
10977 * Get the selector table base and check bounds.
10978 */
10979 RTGCPTR GCPtrBase;
10980 if (uSel & X86_SEL_LDT)
10981 {
10982 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10983 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10984 {
10985 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10986 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10987 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10988 uErrorCode, 0);
10989 }
10990
10991 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10992 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10993 }
10994 else
10995 {
10996 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10997 {
10998 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10999 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11000 uErrorCode, 0);
11001 }
11002 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11003 }
11004
11005 /*
11006 * Read the legacy descriptor and maybe the long mode extensions if
11007 * required.
11008 */
11009 VBOXSTRICTRC rcStrict;
11010 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11011 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11012 else
11013 {
11014 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11015 if (rcStrict == VINF_SUCCESS)
11016 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11017 if (rcStrict == VINF_SUCCESS)
11018 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11019 if (rcStrict == VINF_SUCCESS)
11020 pDesc->Legacy.au16[3] = 0;
11021 else
11022 return rcStrict;
11023 }
11024
11025 if (rcStrict == VINF_SUCCESS)
11026 {
11027 if ( !IEM_IS_LONG_MODE(pVCpu)
11028 || pDesc->Legacy.Gen.u1DescType)
11029 pDesc->Long.au64[1] = 0;
11030 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11031 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11032 else
11033 {
11034 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11035 /** @todo is this the right exception? */
11036 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11037 }
11038 }
11039 return rcStrict;
11040}
11041
11042
11043/**
11044 * Fetches a descriptor table entry.
11045 *
11046 * @returns Strict VBox status code.
11047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11048 * @param pDesc Where to return the descriptor table entry.
11049 * @param uSel The selector which table entry to fetch.
11050 * @param uXcpt The exception to raise on table lookup error.
11051 */
11052IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11053{
11054 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11055}
11056
11057
11058/**
11059 * Fakes a long mode stack selector for SS = 0.
11060 *
11061 * @param pDescSs Where to return the fake stack descriptor.
11062 * @param uDpl The DPL we want.
11063 */
11064IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11065{
11066 pDescSs->Long.au64[0] = 0;
11067 pDescSs->Long.au64[1] = 0;
11068 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11069 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11070 pDescSs->Long.Gen.u2Dpl = uDpl;
11071 pDescSs->Long.Gen.u1Present = 1;
11072 pDescSs->Long.Gen.u1Long = 1;
11073}
11074
11075
11076/**
11077 * Marks the selector descriptor as accessed (only non-system descriptors).
11078 *
11079 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11080 * will therefore skip the limit checks.
11081 *
11082 * @returns Strict VBox status code.
11083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11084 * @param uSel The selector.
11085 */
11086IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11087{
11088 /*
11089 * Get the selector table base and calculate the entry address.
11090 */
11091 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11092 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11093 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11094 GCPtr += uSel & X86_SEL_MASK;
11095
11096 /*
11097 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11098 * ugly stuff to avoid this. This will make sure it's an atomic access
11099 * as well more or less remove any question about 8-bit or 32-bit accesss.
11100 */
11101 VBOXSTRICTRC rcStrict;
11102 uint32_t volatile *pu32;
11103 if ((GCPtr & 3) == 0)
11104 {
11105 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11106 GCPtr += 2 + 2;
11107 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11108 if (rcStrict != VINF_SUCCESS)
11109 return rcStrict;
11110 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11111 }
11112 else
11113 {
11114 /* The misaligned GDT/LDT case, map the whole thing. */
11115 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11116 if (rcStrict != VINF_SUCCESS)
11117 return rcStrict;
11118 switch ((uintptr_t)pu32 & 3)
11119 {
11120 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11121 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11122 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11123 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11124 }
11125 }
11126
11127 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11128}
11129
11130/** @} */
11131
11132
11133/*
11134 * Include the C/C++ implementation of instruction.
11135 */
11136#include "IEMAllCImpl.cpp.h"
11137
11138
11139
11140/** @name "Microcode" macros.
11141 *
11142 * The idea is that we should be able to use the same code to interpret
11143 * instructions as well as recompiler instructions. Thus this obfuscation.
11144 *
11145 * @{
11146 */
11147#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11148#define IEM_MC_END() }
11149#define IEM_MC_PAUSE() do {} while (0)
11150#define IEM_MC_CONTINUE() do {} while (0)
11151
11152/** Internal macro. */
11153#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11154 do \
11155 { \
11156 VBOXSTRICTRC rcStrict2 = a_Expr; \
11157 if (rcStrict2 != VINF_SUCCESS) \
11158 return rcStrict2; \
11159 } while (0)
11160
11161
11162#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11163#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11164#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11165#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11166#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11167#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11168#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11169#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11170#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11171 do { \
11172 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11173 return iemRaiseDeviceNotAvailable(pVCpu); \
11174 } while (0)
11175#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11176 do { \
11177 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11178 return iemRaiseDeviceNotAvailable(pVCpu); \
11179 } while (0)
11180#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11181 do { \
11182 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11183 return iemRaiseMathFault(pVCpu); \
11184 } while (0)
11185#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11186 do { \
11187 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11188 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11189 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11190 return iemRaiseUndefinedOpcode(pVCpu); \
11191 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11192 return iemRaiseDeviceNotAvailable(pVCpu); \
11193 } while (0)
11194#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11195 do { \
11196 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11197 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11198 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11199 return iemRaiseUndefinedOpcode(pVCpu); \
11200 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11201 return iemRaiseDeviceNotAvailable(pVCpu); \
11202 } while (0)
11203#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11204 do { \
11205 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11206 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11207 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11208 return iemRaiseUndefinedOpcode(pVCpu); \
11209 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11210 return iemRaiseDeviceNotAvailable(pVCpu); \
11211 } while (0)
11212#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11213 do { \
11214 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11215 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11216 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11217 return iemRaiseUndefinedOpcode(pVCpu); \
11218 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11219 return iemRaiseDeviceNotAvailable(pVCpu); \
11220 } while (0)
11221#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11222 do { \
11223 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11224 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11225 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11226 return iemRaiseUndefinedOpcode(pVCpu); \
11227 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11228 return iemRaiseDeviceNotAvailable(pVCpu); \
11229 } while (0)
11230#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11231 do { \
11232 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11233 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11234 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11235 return iemRaiseUndefinedOpcode(pVCpu); \
11236 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11237 return iemRaiseDeviceNotAvailable(pVCpu); \
11238 } while (0)
11239#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11240 do { \
11241 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11242 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11243 return iemRaiseUndefinedOpcode(pVCpu); \
11244 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11245 return iemRaiseDeviceNotAvailable(pVCpu); \
11246 } while (0)
11247#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11248 do { \
11249 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11250 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11251 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11252 return iemRaiseUndefinedOpcode(pVCpu); \
11253 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11254 return iemRaiseDeviceNotAvailable(pVCpu); \
11255 } while (0)
11256#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11257 do { \
11258 if (pVCpu->iem.s.uCpl != 0) \
11259 return iemRaiseGeneralProtectionFault0(pVCpu); \
11260 } while (0)
11261#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11262 do { \
11263 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11264 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11265 } while (0)
11266#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11267 do { \
11268 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11269 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11270 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11271 return iemRaiseUndefinedOpcode(pVCpu); \
11272 } while (0)
11273#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11274 do { \
11275 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11276 return iemRaiseGeneralProtectionFault0(pVCpu); \
11277 } while (0)
11278
11279
11280#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11281#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11282#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11283#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11284#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11285#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11286#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11287 uint32_t a_Name; \
11288 uint32_t *a_pName = &a_Name
11289#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11290 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11291
11292#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11293#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11294
11295#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11296#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11297#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11298#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11299#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11300#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11312#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11313 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11314 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11315 } while (0)
11316#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11317 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11318 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11319 } while (0)
11320#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11321 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11322 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11323 } while (0)
11324/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11325#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11326 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11327 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11328 } while (0)
11329#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11330 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11331 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11332 } while (0)
11333/** @note Not for IOPL or IF testing or modification. */
11334#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11335#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11336#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11337#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11338
11339#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11340#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11341#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11342#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11343#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11344#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11345#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11346#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11347#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11348#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11349/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11350#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11351 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11352 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11353 } while (0)
11354#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11355 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11356 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11357 } while (0)
11358#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11359 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11360
11361
11362#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11363#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11364/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11365 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11366#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11367#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11368/** @note Not for IOPL or IF testing or modification. */
11369#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11370
11371#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11372#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11373#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11374 do { \
11375 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11376 *pu32Reg += (a_u32Value); \
11377 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11378 } while (0)
11379#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11380
11381#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11382#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11383#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11384 do { \
11385 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11386 *pu32Reg -= (a_u32Value); \
11387 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11388 } while (0)
11389#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11390#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11391
11392#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11393#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11394#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11395#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11396#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11397#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11398#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11399
11400#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11401#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11402#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11403#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11404
11405#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11406#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11407#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11408
11409#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11410#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11411#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11412
11413#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11414#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11415#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11416
11417#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11418#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11419#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11420
11421#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11422
11423#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11424
11425#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11426#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11427#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11428 do { \
11429 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11430 *pu32Reg &= (a_u32Value); \
11431 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11432 } while (0)
11433#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11434
11435#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11436#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11437#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11438 do { \
11439 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11440 *pu32Reg |= (a_u32Value); \
11441 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11442 } while (0)
11443#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11444
11445
11446/** @note Not for IOPL or IF modification. */
11447#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11448/** @note Not for IOPL or IF modification. */
11449#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11450/** @note Not for IOPL or IF modification. */
11451#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11452
11453#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11454
11455/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11456#define IEM_MC_FPU_TO_MMX_MODE() do { \
11457 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11458 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11459 } while (0)
11460
11461/** Switches the FPU state from MMX mode (FTW=0xffff). */
11462#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11463 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11464 } while (0)
11465
11466#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11467 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11468#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11469 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11470#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11471 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11472 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11473 } while (0)
11474#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11475 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11476 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11477 } while (0)
11478#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11479 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11480#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11481 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11482#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11483 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11484
11485#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11486 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11487 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11488 } while (0)
11489#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11490 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11491#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11492 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11493#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11494 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11495#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11496 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11497 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11498 } while (0)
11499#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11500 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11501#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11502 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11503 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11504 } while (0)
11505#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11506 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11507#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11508 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11509 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11510 } while (0)
11511#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11512 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11513#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11514 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11515#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11516 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11517#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11518 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11519#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11520 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11521 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11522 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11523 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11524 } while (0)
11525
11526#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11527 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11528 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11529 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11530 } while (0)
11531#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11532 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11533 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11534 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11535 } while (0)
11536#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11537 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11538 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11539 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11540 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11541 } while (0)
11542#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11543 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11544 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11545 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11546 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11547 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11548 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11549 } while (0)
11550
11551#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11552#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11553 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11554 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11555 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11556 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11558 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11559 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11560 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11561 } while (0)
11562#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11563 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11564 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11569 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11570 } while (0)
11571#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11572 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11573 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11574 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11575 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11577 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11578 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11579 } while (0)
11580#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11581 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11582 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11583 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11584 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11585 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11586 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11587 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11588 } while (0)
11589
11590#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11591 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11592#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11593 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11594#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11595 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11596#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11597 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11598 uintptr_t const iYRegTmp = (a_iYReg); \
11599 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11600 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11601 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11602 } while (0)
11603
11604#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11605 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11606 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11607 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11608 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11609 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11610 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11611 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11612 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11613 } while (0)
11614#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11615 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11616 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11617 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11618 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11619 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11620 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11621 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11622 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11623 } while (0)
11624#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11625 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11626 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11627 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11628 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11629 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11630 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11631 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11632 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11633 } while (0)
11634
11635#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11636 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11637 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11638 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11639 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11640 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11641 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11642 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11643 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11644 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11645 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11646 } while (0)
11647#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11648 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11649 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11650 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11651 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11652 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11653 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11654 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11655 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11656 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11657 } while (0)
11658#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11659 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11660 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11661 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11662 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11663 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11664 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11665 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11666 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11667 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11668 } while (0)
11669#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11670 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11671 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11672 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11673 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11674 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11675 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11676 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11677 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11678 } while (0)
11679
11680#ifndef IEM_WITH_SETJMP
11681# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11682 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11683# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11685# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11687#else
11688# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11689 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11690# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11691 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11692# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11693 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11694#endif
11695
11696#ifndef IEM_WITH_SETJMP
11697# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11699# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11701# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11703#else
11704# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11705 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11706# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11707 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11708# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11709 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11710#endif
11711
11712#ifndef IEM_WITH_SETJMP
11713# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11715# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11717# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11719#else
11720# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11721 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11722# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11723 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11724# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11725 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11726#endif
11727
11728#ifdef SOME_UNUSED_FUNCTION
11729# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11731#endif
11732
11733#ifndef IEM_WITH_SETJMP
11734# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11736# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11738# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11740# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11742#else
11743# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11744 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11745# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11746 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11747# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11748 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11749# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11750 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11751#endif
11752
11753#ifndef IEM_WITH_SETJMP
11754# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11758# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11760#else
11761# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11762 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11763# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11764 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11765# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11766 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11767#endif
11768
11769#ifndef IEM_WITH_SETJMP
11770# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11772# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11773 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11774#else
11775# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11776 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11777# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11778 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11779#endif
11780
11781#ifndef IEM_WITH_SETJMP
11782# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11784# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11785 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11786#else
11787# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11788 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11789# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11790 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11791#endif
11792
11793
11794
11795#ifndef IEM_WITH_SETJMP
11796# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11797 do { \
11798 uint8_t u8Tmp; \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11800 (a_u16Dst) = u8Tmp; \
11801 } while (0)
11802# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11803 do { \
11804 uint8_t u8Tmp; \
11805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11806 (a_u32Dst) = u8Tmp; \
11807 } while (0)
11808# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11809 do { \
11810 uint8_t u8Tmp; \
11811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11812 (a_u64Dst) = u8Tmp; \
11813 } while (0)
11814# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11815 do { \
11816 uint16_t u16Tmp; \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11818 (a_u32Dst) = u16Tmp; \
11819 } while (0)
11820# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11821 do { \
11822 uint16_t u16Tmp; \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11824 (a_u64Dst) = u16Tmp; \
11825 } while (0)
11826# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11827 do { \
11828 uint32_t u32Tmp; \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11830 (a_u64Dst) = u32Tmp; \
11831 } while (0)
11832#else /* IEM_WITH_SETJMP */
11833# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11834 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11835# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11836 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11837# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11838 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11839# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11840 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11841# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11842 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11843# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11844 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11845#endif /* IEM_WITH_SETJMP */
11846
11847#ifndef IEM_WITH_SETJMP
11848# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11849 do { \
11850 uint8_t u8Tmp; \
11851 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11852 (a_u16Dst) = (int8_t)u8Tmp; \
11853 } while (0)
11854# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11855 do { \
11856 uint8_t u8Tmp; \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11858 (a_u32Dst) = (int8_t)u8Tmp; \
11859 } while (0)
11860# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11861 do { \
11862 uint8_t u8Tmp; \
11863 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11864 (a_u64Dst) = (int8_t)u8Tmp; \
11865 } while (0)
11866# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11867 do { \
11868 uint16_t u16Tmp; \
11869 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11870 (a_u32Dst) = (int16_t)u16Tmp; \
11871 } while (0)
11872# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11873 do { \
11874 uint16_t u16Tmp; \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11876 (a_u64Dst) = (int16_t)u16Tmp; \
11877 } while (0)
11878# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11879 do { \
11880 uint32_t u32Tmp; \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11882 (a_u64Dst) = (int32_t)u32Tmp; \
11883 } while (0)
11884#else /* IEM_WITH_SETJMP */
11885# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11886 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11887# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11888 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11889# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11890 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11891# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11892 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11893# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11894 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11895# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11896 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11897#endif /* IEM_WITH_SETJMP */
11898
11899#ifndef IEM_WITH_SETJMP
11900# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11902# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11904# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11906# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11908#else
11909# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11910 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11911# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11912 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11913# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11914 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11915# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11916 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11917#endif
11918
11919#ifndef IEM_WITH_SETJMP
11920# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11922# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11923 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11924# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11925 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11926# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11927 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11928#else
11929# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11930 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11931# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11932 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11933# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11934 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11935# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11936 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11937#endif
11938
11939#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11940#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11941#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11942#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11943#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11944#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11945#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11946 do { \
11947 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11948 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11949 } while (0)
11950
11951#ifndef IEM_WITH_SETJMP
11952# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11953 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11954# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11955 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11956#else
11957# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11958 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11959# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11960 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11961#endif
11962
11963#ifndef IEM_WITH_SETJMP
11964# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11965 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11966# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11967 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11968#else
11969# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11970 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11971# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11972 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11973#endif
11974
11975
11976#define IEM_MC_PUSH_U16(a_u16Value) \
11977 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11978#define IEM_MC_PUSH_U32(a_u32Value) \
11979 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11980#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11981 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11982#define IEM_MC_PUSH_U64(a_u64Value) \
11983 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11984
11985#define IEM_MC_POP_U16(a_pu16Value) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11987#define IEM_MC_POP_U32(a_pu32Value) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11989#define IEM_MC_POP_U64(a_pu64Value) \
11990 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11991
11992/** Maps guest memory for direct or bounce buffered access.
11993 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11994 * @remarks May return.
11995 */
11996#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11997 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11998
11999/** Maps guest memory for direct or bounce buffered access.
12000 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12001 * @remarks May return.
12002 */
12003#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12004 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12005
12006/** Commits the memory and unmaps the guest memory.
12007 * @remarks May return.
12008 */
12009#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12010 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12011
12012/** Commits the memory and unmaps the guest memory unless the FPU status word
12013 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12014 * that would cause FLD not to store.
12015 *
12016 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12017 * store, while \#P will not.
12018 *
12019 * @remarks May in theory return - for now.
12020 */
12021#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12022 do { \
12023 if ( !(a_u16FSW & X86_FSW_ES) \
12024 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12025 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12026 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12027 } while (0)
12028
12029/** Calculate efficient address from R/M. */
12030#ifndef IEM_WITH_SETJMP
12031# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12032 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12033#else
12034# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12035 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12036#endif
12037
12038#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12039#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12040#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12041#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12042#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12043#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12044#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12045
12046/**
12047 * Defers the rest of the instruction emulation to a C implementation routine
12048 * and returns, only taking the standard parameters.
12049 *
12050 * @param a_pfnCImpl The pointer to the C routine.
12051 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12052 */
12053#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12054
12055/**
12056 * Defers the rest of instruction emulation to a C implementation routine and
12057 * returns, taking one argument in addition to the standard ones.
12058 *
12059 * @param a_pfnCImpl The pointer to the C routine.
12060 * @param a0 The argument.
12061 */
12062#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12063
12064/**
12065 * Defers the rest of the instruction emulation to a C implementation routine
12066 * and returns, taking two arguments in addition to the standard ones.
12067 *
12068 * @param a_pfnCImpl The pointer to the C routine.
12069 * @param a0 The first extra argument.
12070 * @param a1 The second extra argument.
12071 */
12072#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12073
12074/**
12075 * Defers the rest of the instruction emulation to a C implementation routine
12076 * and returns, taking three arguments in addition to the standard ones.
12077 *
12078 * @param a_pfnCImpl The pointer to the C routine.
12079 * @param a0 The first extra argument.
12080 * @param a1 The second extra argument.
12081 * @param a2 The third extra argument.
12082 */
12083#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12084
12085/**
12086 * Defers the rest of the instruction emulation to a C implementation routine
12087 * and returns, taking four arguments in addition to the standard ones.
12088 *
12089 * @param a_pfnCImpl The pointer to the C routine.
12090 * @param a0 The first extra argument.
12091 * @param a1 The second extra argument.
12092 * @param a2 The third extra argument.
12093 * @param a3 The fourth extra argument.
12094 */
12095#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12096
12097/**
12098 * Defers the rest of the instruction emulation to a C implementation routine
12099 * and returns, taking two arguments in addition to the standard ones.
12100 *
12101 * @param a_pfnCImpl The pointer to the C routine.
12102 * @param a0 The first extra argument.
12103 * @param a1 The second extra argument.
12104 * @param a2 The third extra argument.
12105 * @param a3 The fourth extra argument.
12106 * @param a4 The fifth extra argument.
12107 */
12108#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12109
12110/**
12111 * Defers the entire instruction emulation to a C implementation routine and
12112 * returns, only taking the standard parameters.
12113 *
12114 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12115 *
12116 * @param a_pfnCImpl The pointer to the C routine.
12117 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12118 */
12119#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12120
12121/**
12122 * Defers the entire instruction emulation to a C implementation routine and
12123 * returns, taking one argument in addition to the standard ones.
12124 *
12125 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12126 *
12127 * @param a_pfnCImpl The pointer to the C routine.
12128 * @param a0 The argument.
12129 */
12130#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12131
12132/**
12133 * Defers the entire instruction emulation to a C implementation routine and
12134 * returns, taking two arguments in addition to the standard ones.
12135 *
12136 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12137 *
12138 * @param a_pfnCImpl The pointer to the C routine.
12139 * @param a0 The first extra argument.
12140 * @param a1 The second extra argument.
12141 */
12142#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12143
12144/**
12145 * Defers the entire instruction emulation to a C implementation routine and
12146 * returns, taking three arguments in addition to the standard ones.
12147 *
12148 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12149 *
12150 * @param a_pfnCImpl The pointer to the C routine.
12151 * @param a0 The first extra argument.
12152 * @param a1 The second extra argument.
12153 * @param a2 The third extra argument.
12154 */
12155#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12156
12157/**
12158 * Calls a FPU assembly implementation taking one visible argument.
12159 *
12160 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12161 * @param a0 The first extra argument.
12162 */
12163#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12164 do { \
12165 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12166 } while (0)
12167
12168/**
12169 * Calls a FPU assembly implementation taking two visible arguments.
12170 *
12171 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12172 * @param a0 The first extra argument.
12173 * @param a1 The second extra argument.
12174 */
12175#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12176 do { \
12177 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12178 } while (0)
12179
12180/**
12181 * Calls a FPU assembly implementation taking three visible arguments.
12182 *
12183 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12184 * @param a0 The first extra argument.
12185 * @param a1 The second extra argument.
12186 * @param a2 The third extra argument.
12187 */
12188#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12189 do { \
12190 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12191 } while (0)
12192
12193#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12194 do { \
12195 (a_FpuData).FSW = (a_FSW); \
12196 (a_FpuData).r80Result = *(a_pr80Value); \
12197 } while (0)
12198
12199/** Pushes FPU result onto the stack. */
12200#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12201 iemFpuPushResult(pVCpu, &a_FpuData)
12202/** Pushes FPU result onto the stack and sets the FPUDP. */
12203#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12204 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12205
12206/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12207#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12208 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12209
12210/** Stores FPU result in a stack register. */
12211#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12212 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12213/** Stores FPU result in a stack register and pops the stack. */
12214#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12215 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12216/** Stores FPU result in a stack register and sets the FPUDP. */
12217#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12218 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12219/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12220 * stack. */
12221#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12222 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12223
12224/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12225#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12226 iemFpuUpdateOpcodeAndIp(pVCpu)
12227/** Free a stack register (for FFREE and FFREEP). */
12228#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12229 iemFpuStackFree(pVCpu, a_iStReg)
12230/** Increment the FPU stack pointer. */
12231#define IEM_MC_FPU_STACK_INC_TOP() \
12232 iemFpuStackIncTop(pVCpu)
12233/** Decrement the FPU stack pointer. */
12234#define IEM_MC_FPU_STACK_DEC_TOP() \
12235 iemFpuStackDecTop(pVCpu)
12236
12237/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12238#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12239 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12240/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12241#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12242 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12243/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12244#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12245 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12246/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12247#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12248 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12249/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12250 * stack. */
12251#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12252 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12253/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12254#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12255 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12256
12257/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12258#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12259 iemFpuStackUnderflow(pVCpu, a_iStDst)
12260/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12261 * stack. */
12262#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12263 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12264/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12265 * FPUDS. */
12266#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12267 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12268/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12269 * FPUDS. Pops stack. */
12270#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12271 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12272/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12273 * stack twice. */
12274#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12275 iemFpuStackUnderflowThenPopPop(pVCpu)
12276/** Raises a FPU stack underflow exception for an instruction pushing a result
12277 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12278#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12279 iemFpuStackPushUnderflow(pVCpu)
12280/** Raises a FPU stack underflow exception for an instruction pushing a result
12281 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12282#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12283 iemFpuStackPushUnderflowTwo(pVCpu)
12284
12285/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12286 * FPUIP, FPUCS and FOP. */
12287#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12288 iemFpuStackPushOverflow(pVCpu)
12289/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12290 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12291#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12292 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12293/** Prepares for using the FPU state.
12294 * Ensures that we can use the host FPU in the current context (RC+R0.
12295 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12296#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12297/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12298#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12299/** Actualizes the guest FPU state so it can be accessed and modified. */
12300#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12301
12302/** Prepares for using the SSE state.
12303 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12304 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12305#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12306/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12307#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12308/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12309#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12310
12311/** Prepares for using the AVX state.
12312 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12313 * Ensures the guest AVX state in the CPUMCTX is up to date.
12314 * @note This will include the AVX512 state too when support for it is added
12315 * due to the zero extending feature of VEX instruction. */
12316#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12317/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12318#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12319/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12320#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12321
12322/**
12323 * Calls a MMX assembly implementation taking two visible arguments.
12324 *
12325 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12326 * @param a0 The first extra argument.
12327 * @param a1 The second extra argument.
12328 */
12329#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12330 do { \
12331 IEM_MC_PREPARE_FPU_USAGE(); \
12332 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12333 } while (0)
12334
12335/**
12336 * Calls a MMX assembly implementation taking three visible arguments.
12337 *
12338 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12339 * @param a0 The first extra argument.
12340 * @param a1 The second extra argument.
12341 * @param a2 The third extra argument.
12342 */
12343#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12344 do { \
12345 IEM_MC_PREPARE_FPU_USAGE(); \
12346 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12347 } while (0)
12348
12349
12350/**
12351 * Calls a SSE assembly implementation taking two visible arguments.
12352 *
12353 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12354 * @param a0 The first extra argument.
12355 * @param a1 The second extra argument.
12356 */
12357#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12358 do { \
12359 IEM_MC_PREPARE_SSE_USAGE(); \
12360 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12361 } while (0)
12362
12363/**
12364 * Calls a SSE assembly implementation taking three visible arguments.
12365 *
12366 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12367 * @param a0 The first extra argument.
12368 * @param a1 The second extra argument.
12369 * @param a2 The third extra argument.
12370 */
12371#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12372 do { \
12373 IEM_MC_PREPARE_SSE_USAGE(); \
12374 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12375 } while (0)
12376
12377
12378/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12379 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12380#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12381 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12382
12383/**
12384 * Calls a AVX assembly implementation taking two visible arguments.
12385 *
12386 * There is one implicit zero'th argument, a pointer to the extended state.
12387 *
12388 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12389 * @param a1 The first extra argument.
12390 * @param a2 The second extra argument.
12391 */
12392#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12393 do { \
12394 IEM_MC_PREPARE_AVX_USAGE(); \
12395 a_pfnAImpl(pXState, (a1), (a2)); \
12396 } while (0)
12397
12398/**
12399 * Calls a AVX assembly implementation taking three visible arguments.
12400 *
12401 * There is one implicit zero'th argument, a pointer to the extended state.
12402 *
12403 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12404 * @param a1 The first extra argument.
12405 * @param a2 The second extra argument.
12406 * @param a3 The third extra argument.
12407 */
12408#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12409 do { \
12410 IEM_MC_PREPARE_AVX_USAGE(); \
12411 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12412 } while (0)
12413
12414/** @note Not for IOPL or IF testing. */
12415#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12416/** @note Not for IOPL or IF testing. */
12417#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12418/** @note Not for IOPL or IF testing. */
12419#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12420/** @note Not for IOPL or IF testing. */
12421#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12422/** @note Not for IOPL or IF testing. */
12423#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12424 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12425 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12426/** @note Not for IOPL or IF testing. */
12427#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12428 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12429 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12430/** @note Not for IOPL or IF testing. */
12431#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12432 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12433 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12434 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12435/** @note Not for IOPL or IF testing. */
12436#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12437 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12438 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12439 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12440#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12441#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12442#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12443/** @note Not for IOPL or IF testing. */
12444#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12445 if ( pVCpu->cpum.GstCtx.cx != 0 \
12446 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12447/** @note Not for IOPL or IF testing. */
12448#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12449 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12450 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12451/** @note Not for IOPL or IF testing. */
12452#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12453 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12454 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12455/** @note Not for IOPL or IF testing. */
12456#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12457 if ( pVCpu->cpum.GstCtx.cx != 0 \
12458 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12459/** @note Not for IOPL or IF testing. */
12460#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12461 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12462 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12463/** @note Not for IOPL or IF testing. */
12464#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12465 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12466 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12467#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12468#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12469
12470#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12471 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12472#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12473 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12474#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12475 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12476#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12477 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12478#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12479 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12480#define IEM_MC_IF_FCW_IM() \
12481 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12482
12483#define IEM_MC_ELSE() } else {
12484#define IEM_MC_ENDIF() } do {} while (0)
12485
12486/** @} */
12487
12488
12489/** @name Opcode Debug Helpers.
12490 * @{
12491 */
12492#ifdef VBOX_WITH_STATISTICS
12493# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12494#else
12495# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12496#endif
12497
12498#ifdef DEBUG
12499# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12500 do { \
12501 IEMOP_INC_STATS(a_Stats); \
12502 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12503 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12504 } while (0)
12505
12506# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12507 do { \
12508 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12509 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12510 (void)RT_CONCAT(OP_,a_Upper); \
12511 (void)(a_fDisHints); \
12512 (void)(a_fIemHints); \
12513 } while (0)
12514
12515# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12516 do { \
12517 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12518 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12519 (void)RT_CONCAT(OP_,a_Upper); \
12520 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12521 (void)(a_fDisHints); \
12522 (void)(a_fIemHints); \
12523 } while (0)
12524
12525# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12526 do { \
12527 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12528 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12529 (void)RT_CONCAT(OP_,a_Upper); \
12530 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12531 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12532 (void)(a_fDisHints); \
12533 (void)(a_fIemHints); \
12534 } while (0)
12535
12536# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12537 do { \
12538 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12539 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12540 (void)RT_CONCAT(OP_,a_Upper); \
12541 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12542 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12543 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12544 (void)(a_fDisHints); \
12545 (void)(a_fIemHints); \
12546 } while (0)
12547
12548# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12549 do { \
12550 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12551 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12552 (void)RT_CONCAT(OP_,a_Upper); \
12553 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12554 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12555 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12556 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12557 (void)(a_fDisHints); \
12558 (void)(a_fIemHints); \
12559 } while (0)
12560
12561#else
12562# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12563
12564# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12565 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12566# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12567 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12568# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12569 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12570# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12571 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12572# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12573 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12574
12575#endif
12576
12577#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12578 IEMOP_MNEMONIC0EX(a_Lower, \
12579 #a_Lower, \
12580 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12581#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12582 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12583 #a_Lower " " #a_Op1, \
12584 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12585#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12586 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12587 #a_Lower " " #a_Op1 "," #a_Op2, \
12588 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12589#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12590 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12591 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12592 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12593#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12594 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12595 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12596 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12597
12598/** @} */
12599
12600
12601/** @name Opcode Helpers.
12602 * @{
12603 */
12604
12605#ifdef IN_RING3
12606# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12607 do { \
12608 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12609 else \
12610 { \
12611 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12612 return IEMOP_RAISE_INVALID_OPCODE(); \
12613 } \
12614 } while (0)
12615#else
12616# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12617 do { \
12618 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12619 else return IEMOP_RAISE_INVALID_OPCODE(); \
12620 } while (0)
12621#endif
12622
12623/** The instruction requires a 186 or later. */
12624#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12625# define IEMOP_HLP_MIN_186() do { } while (0)
12626#else
12627# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12628#endif
12629
12630/** The instruction requires a 286 or later. */
12631#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12632# define IEMOP_HLP_MIN_286() do { } while (0)
12633#else
12634# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12635#endif
12636
12637/** The instruction requires a 386 or later. */
12638#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12639# define IEMOP_HLP_MIN_386() do { } while (0)
12640#else
12641# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12642#endif
12643
12644/** The instruction requires a 386 or later if the given expression is true. */
12645#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12646# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12647#else
12648# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12649#endif
12650
12651/** The instruction requires a 486 or later. */
12652#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12653# define IEMOP_HLP_MIN_486() do { } while (0)
12654#else
12655# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12656#endif
12657
12658/** The instruction requires a Pentium (586) or later. */
12659#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12660# define IEMOP_HLP_MIN_586() do { } while (0)
12661#else
12662# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12663#endif
12664
12665/** The instruction requires a PentiumPro (686) or later. */
12666#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12667# define IEMOP_HLP_MIN_686() do { } while (0)
12668#else
12669# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12670#endif
12671
12672
12673/** The instruction raises an \#UD in real and V8086 mode. */
12674#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12675 do \
12676 { \
12677 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12678 else return IEMOP_RAISE_INVALID_OPCODE(); \
12679 } while (0)
12680
12681#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12682/** This instruction raises an \#UD in real and V8086 mode or when not using a
12683 * 64-bit code segment when in long mode (applicable to all VMX instructions
12684 * except VMCALL).
12685 */
12686#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12687 do \
12688 { \
12689 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12690 && ( !IEM_IS_LONG_MODE(pVCpu) \
12691 || IEM_IS_64BIT_CODE(pVCpu))) \
12692 { /* likely */ } \
12693 else \
12694 { \
12695 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12696 { \
12697 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12698 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12699 return IEMOP_RAISE_INVALID_OPCODE(); \
12700 } \
12701 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12702 { \
12703 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12704 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12705 return IEMOP_RAISE_INVALID_OPCODE(); \
12706 } \
12707 } \
12708 } while (0)
12709
12710/** The instruction can only be executed in VMX operation (VMX root mode and
12711 * non-root mode).
12712 *
12713 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12714 */
12715# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12716 do \
12717 { \
12718 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12719 else \
12720 { \
12721 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12722 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12723 return IEMOP_RAISE_INVALID_OPCODE(); \
12724 } \
12725 } while (0)
12726#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12727
12728/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12729 * 64-bit mode. */
12730#define IEMOP_HLP_NO_64BIT() \
12731 do \
12732 { \
12733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12734 return IEMOP_RAISE_INVALID_OPCODE(); \
12735 } while (0)
12736
12737/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12738 * 64-bit mode. */
12739#define IEMOP_HLP_ONLY_64BIT() \
12740 do \
12741 { \
12742 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12743 return IEMOP_RAISE_INVALID_OPCODE(); \
12744 } while (0)
12745
12746/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12747#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12748 do \
12749 { \
12750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12751 iemRecalEffOpSize64Default(pVCpu); \
12752 } while (0)
12753
12754/** The instruction has 64-bit operand size if 64-bit mode. */
12755#define IEMOP_HLP_64BIT_OP_SIZE() \
12756 do \
12757 { \
12758 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12759 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12760 } while (0)
12761
12762/** Only a REX prefix immediately preceeding the first opcode byte takes
12763 * effect. This macro helps ensuring this as well as logging bad guest code. */
12764#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12765 do \
12766 { \
12767 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12768 { \
12769 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12770 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12771 pVCpu->iem.s.uRexB = 0; \
12772 pVCpu->iem.s.uRexIndex = 0; \
12773 pVCpu->iem.s.uRexReg = 0; \
12774 iemRecalEffOpSize(pVCpu); \
12775 } \
12776 } while (0)
12777
12778/**
12779 * Done decoding.
12780 */
12781#define IEMOP_HLP_DONE_DECODING() \
12782 do \
12783 { \
12784 /*nothing for now, maybe later... */ \
12785 } while (0)
12786
12787/**
12788 * Done decoding, raise \#UD exception if lock prefix present.
12789 */
12790#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12791 do \
12792 { \
12793 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12794 { /* likely */ } \
12795 else \
12796 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12797 } while (0)
12798
12799
12800/**
12801 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12802 * repnz or size prefixes are present, or if in real or v8086 mode.
12803 */
12804#define IEMOP_HLP_DONE_VEX_DECODING() \
12805 do \
12806 { \
12807 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12808 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12809 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12810 { /* likely */ } \
12811 else \
12812 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12813 } while (0)
12814
12815/**
12816 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12817 * repnz or size prefixes are present, or if in real or v8086 mode.
12818 */
12819#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12820 do \
12821 { \
12822 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12823 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12824 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12825 && pVCpu->iem.s.uVexLength == 0)) \
12826 { /* likely */ } \
12827 else \
12828 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12829 } while (0)
12830
12831
12832/**
12833 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12834 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12835 * register 0, or if in real or v8086 mode.
12836 */
12837#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12838 do \
12839 { \
12840 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12841 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12842 && !pVCpu->iem.s.uVex3rdReg \
12843 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12844 { /* likely */ } \
12845 else \
12846 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12847 } while (0)
12848
12849/**
12850 * Done decoding VEX, no V, L=0.
12851 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12852 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12853 */
12854#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12855 do \
12856 { \
12857 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12858 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12859 && pVCpu->iem.s.uVexLength == 0 \
12860 && pVCpu->iem.s.uVex3rdReg == 0 \
12861 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12862 { /* likely */ } \
12863 else \
12864 return IEMOP_RAISE_INVALID_OPCODE(); \
12865 } while (0)
12866
12867#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12868 do \
12869 { \
12870 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12871 { /* likely */ } \
12872 else \
12873 { \
12874 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12875 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12876 } \
12877 } while (0)
12878#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12879 do \
12880 { \
12881 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12882 { /* likely */ } \
12883 else \
12884 { \
12885 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12886 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12887 } \
12888 } while (0)
12889
12890/**
12891 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12892 * are present.
12893 */
12894#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12895 do \
12896 { \
12897 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12898 { /* likely */ } \
12899 else \
12900 return IEMOP_RAISE_INVALID_OPCODE(); \
12901 } while (0)
12902
12903/**
12904 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12905 * prefixes are present.
12906 */
12907#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12908 do \
12909 { \
12910 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12911 { /* likely */ } \
12912 else \
12913 return IEMOP_RAISE_INVALID_OPCODE(); \
12914 } while (0)
12915
12916
12917/**
12918 * Calculates the effective address of a ModR/M memory operand.
12919 *
12920 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12921 *
12922 * @return Strict VBox status code.
12923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12924 * @param bRm The ModRM byte.
12925 * @param cbImm The size of any immediate following the
12926 * effective address opcode bytes. Important for
12927 * RIP relative addressing.
12928 * @param pGCPtrEff Where to return the effective address.
12929 */
12930IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12931{
12932 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12933# define SET_SS_DEF() \
12934 do \
12935 { \
12936 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12937 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12938 } while (0)
12939
12940 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12941 {
12942/** @todo Check the effective address size crap! */
12943 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12944 {
12945 uint16_t u16EffAddr;
12946
12947 /* Handle the disp16 form with no registers first. */
12948 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12949 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12950 else
12951 {
12952 /* Get the displacment. */
12953 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12954 {
12955 case 0: u16EffAddr = 0; break;
12956 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12957 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12958 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12959 }
12960
12961 /* Add the base and index registers to the disp. */
12962 switch (bRm & X86_MODRM_RM_MASK)
12963 {
12964 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12965 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12966 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12967 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12968 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12969 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12970 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12971 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12972 }
12973 }
12974
12975 *pGCPtrEff = u16EffAddr;
12976 }
12977 else
12978 {
12979 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12980 uint32_t u32EffAddr;
12981
12982 /* Handle the disp32 form with no registers first. */
12983 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12984 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12985 else
12986 {
12987 /* Get the register (or SIB) value. */
12988 switch ((bRm & X86_MODRM_RM_MASK))
12989 {
12990 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12991 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12992 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12993 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12994 case 4: /* SIB */
12995 {
12996 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12997
12998 /* Get the index and scale it. */
12999 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13000 {
13001 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13002 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13003 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13004 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13005 case 4: u32EffAddr = 0; /*none */ break;
13006 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13007 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13008 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13009 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13010 }
13011 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13012
13013 /* add base */
13014 switch (bSib & X86_SIB_BASE_MASK)
13015 {
13016 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13017 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13018 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13019 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13020 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13021 case 5:
13022 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13023 {
13024 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13025 SET_SS_DEF();
13026 }
13027 else
13028 {
13029 uint32_t u32Disp;
13030 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13031 u32EffAddr += u32Disp;
13032 }
13033 break;
13034 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13035 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13036 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13037 }
13038 break;
13039 }
13040 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13041 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13042 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13043 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13044 }
13045
13046 /* Get and add the displacement. */
13047 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13048 {
13049 case 0:
13050 break;
13051 case 1:
13052 {
13053 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13054 u32EffAddr += i8Disp;
13055 break;
13056 }
13057 case 2:
13058 {
13059 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13060 u32EffAddr += u32Disp;
13061 break;
13062 }
13063 default:
13064 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13065 }
13066
13067 }
13068 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13069 *pGCPtrEff = u32EffAddr;
13070 else
13071 {
13072 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13073 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13074 }
13075 }
13076 }
13077 else
13078 {
13079 uint64_t u64EffAddr;
13080
13081 /* Handle the rip+disp32 form with no registers first. */
13082 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13083 {
13084 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13085 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13086 }
13087 else
13088 {
13089 /* Get the register (or SIB) value. */
13090 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13091 {
13092 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13093 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13094 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13095 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13096 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13097 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13098 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13099 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13100 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13101 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13102 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13103 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13104 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13105 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13106 /* SIB */
13107 case 4:
13108 case 12:
13109 {
13110 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13111
13112 /* Get the index and scale it. */
13113 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13114 {
13115 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13116 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13117 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13118 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13119 case 4: u64EffAddr = 0; /*none */ break;
13120 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13121 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13122 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13123 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13124 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13125 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13126 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13127 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13128 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13129 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13130 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13131 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13132 }
13133 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13134
13135 /* add base */
13136 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13137 {
13138 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13139 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13140 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13141 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13142 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13143 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13144 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13145 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13146 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13147 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13148 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13149 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13150 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13151 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13152 /* complicated encodings */
13153 case 5:
13154 case 13:
13155 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13156 {
13157 if (!pVCpu->iem.s.uRexB)
13158 {
13159 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13160 SET_SS_DEF();
13161 }
13162 else
13163 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13164 }
13165 else
13166 {
13167 uint32_t u32Disp;
13168 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13169 u64EffAddr += (int32_t)u32Disp;
13170 }
13171 break;
13172 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13173 }
13174 break;
13175 }
13176 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13177 }
13178
13179 /* Get and add the displacement. */
13180 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13181 {
13182 case 0:
13183 break;
13184 case 1:
13185 {
13186 int8_t i8Disp;
13187 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13188 u64EffAddr += i8Disp;
13189 break;
13190 }
13191 case 2:
13192 {
13193 uint32_t u32Disp;
13194 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13195 u64EffAddr += (int32_t)u32Disp;
13196 break;
13197 }
13198 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13199 }
13200
13201 }
13202
13203 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13204 *pGCPtrEff = u64EffAddr;
13205 else
13206 {
13207 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13208 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13209 }
13210 }
13211
13212 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13213 return VINF_SUCCESS;
13214}
13215
13216
13217/**
13218 * Calculates the effective address of a ModR/M memory operand.
13219 *
13220 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13221 *
13222 * @return Strict VBox status code.
13223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13224 * @param bRm The ModRM byte.
13225 * @param cbImm The size of any immediate following the
13226 * effective address opcode bytes. Important for
13227 * RIP relative addressing.
13228 * @param pGCPtrEff Where to return the effective address.
13229 * @param offRsp RSP displacement.
13230 */
13231IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13232{
13233 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13234# define SET_SS_DEF() \
13235 do \
13236 { \
13237 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13238 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13239 } while (0)
13240
13241 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13242 {
13243/** @todo Check the effective address size crap! */
13244 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13245 {
13246 uint16_t u16EffAddr;
13247
13248 /* Handle the disp16 form with no registers first. */
13249 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13250 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13251 else
13252 {
13253 /* Get the displacment. */
13254 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13255 {
13256 case 0: u16EffAddr = 0; break;
13257 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13258 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13259 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13260 }
13261
13262 /* Add the base and index registers to the disp. */
13263 switch (bRm & X86_MODRM_RM_MASK)
13264 {
13265 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13266 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13267 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13268 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13269 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13270 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13271 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13272 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13273 }
13274 }
13275
13276 *pGCPtrEff = u16EffAddr;
13277 }
13278 else
13279 {
13280 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13281 uint32_t u32EffAddr;
13282
13283 /* Handle the disp32 form with no registers first. */
13284 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13285 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13286 else
13287 {
13288 /* Get the register (or SIB) value. */
13289 switch ((bRm & X86_MODRM_RM_MASK))
13290 {
13291 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13292 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13293 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13294 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13295 case 4: /* SIB */
13296 {
13297 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13298
13299 /* Get the index and scale it. */
13300 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13301 {
13302 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13303 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13304 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13305 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13306 case 4: u32EffAddr = 0; /*none */ break;
13307 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13308 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13309 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13310 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13311 }
13312 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13313
13314 /* add base */
13315 switch (bSib & X86_SIB_BASE_MASK)
13316 {
13317 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13318 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13319 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13320 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13321 case 4:
13322 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13323 SET_SS_DEF();
13324 break;
13325 case 5:
13326 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13327 {
13328 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13329 SET_SS_DEF();
13330 }
13331 else
13332 {
13333 uint32_t u32Disp;
13334 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13335 u32EffAddr += u32Disp;
13336 }
13337 break;
13338 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13339 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13340 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13341 }
13342 break;
13343 }
13344 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13345 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13346 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13347 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13348 }
13349
13350 /* Get and add the displacement. */
13351 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13352 {
13353 case 0:
13354 break;
13355 case 1:
13356 {
13357 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13358 u32EffAddr += i8Disp;
13359 break;
13360 }
13361 case 2:
13362 {
13363 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13364 u32EffAddr += u32Disp;
13365 break;
13366 }
13367 default:
13368 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13369 }
13370
13371 }
13372 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13373 *pGCPtrEff = u32EffAddr;
13374 else
13375 {
13376 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13377 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13378 }
13379 }
13380 }
13381 else
13382 {
13383 uint64_t u64EffAddr;
13384
13385 /* Handle the rip+disp32 form with no registers first. */
13386 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13387 {
13388 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13389 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13390 }
13391 else
13392 {
13393 /* Get the register (or SIB) value. */
13394 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13395 {
13396 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13397 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13398 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13399 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13400 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13401 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13402 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13403 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13404 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13405 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13406 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13407 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13408 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13409 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13410 /* SIB */
13411 case 4:
13412 case 12:
13413 {
13414 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13415
13416 /* Get the index and scale it. */
13417 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13418 {
13419 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13420 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13421 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13422 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13423 case 4: u64EffAddr = 0; /*none */ break;
13424 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13425 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13426 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13427 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13428 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13429 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13430 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13431 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13432 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13433 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13434 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13435 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13436 }
13437 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13438
13439 /* add base */
13440 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13441 {
13442 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13443 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13444 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13445 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13446 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13447 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13448 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13449 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13450 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13451 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13452 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13453 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13454 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13455 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13456 /* complicated encodings */
13457 case 5:
13458 case 13:
13459 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13460 {
13461 if (!pVCpu->iem.s.uRexB)
13462 {
13463 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13464 SET_SS_DEF();
13465 }
13466 else
13467 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13468 }
13469 else
13470 {
13471 uint32_t u32Disp;
13472 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13473 u64EffAddr += (int32_t)u32Disp;
13474 }
13475 break;
13476 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13477 }
13478 break;
13479 }
13480 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13481 }
13482
13483 /* Get and add the displacement. */
13484 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13485 {
13486 case 0:
13487 break;
13488 case 1:
13489 {
13490 int8_t i8Disp;
13491 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13492 u64EffAddr += i8Disp;
13493 break;
13494 }
13495 case 2:
13496 {
13497 uint32_t u32Disp;
13498 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13499 u64EffAddr += (int32_t)u32Disp;
13500 break;
13501 }
13502 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13503 }
13504
13505 }
13506
13507 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13508 *pGCPtrEff = u64EffAddr;
13509 else
13510 {
13511 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13512 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13513 }
13514 }
13515
13516 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13517 return VINF_SUCCESS;
13518}
13519
13520
13521#ifdef IEM_WITH_SETJMP
13522/**
13523 * Calculates the effective address of a ModR/M memory operand.
13524 *
13525 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13526 *
13527 * May longjmp on internal error.
13528 *
13529 * @return The effective address.
13530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13531 * @param bRm The ModRM byte.
13532 * @param cbImm The size of any immediate following the
13533 * effective address opcode bytes. Important for
13534 * RIP relative addressing.
13535 */
13536IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13537{
13538 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13539# define SET_SS_DEF() \
13540 do \
13541 { \
13542 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13543 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13544 } while (0)
13545
13546 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13547 {
13548/** @todo Check the effective address size crap! */
13549 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13550 {
13551 uint16_t u16EffAddr;
13552
13553 /* Handle the disp16 form with no registers first. */
13554 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13555 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13556 else
13557 {
13558 /* Get the displacment. */
13559 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13560 {
13561 case 0: u16EffAddr = 0; break;
13562 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13563 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13564 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13565 }
13566
13567 /* Add the base and index registers to the disp. */
13568 switch (bRm & X86_MODRM_RM_MASK)
13569 {
13570 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13571 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13572 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13573 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13574 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13575 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13576 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13577 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13578 }
13579 }
13580
13581 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13582 return u16EffAddr;
13583 }
13584
13585 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13586 uint32_t u32EffAddr;
13587
13588 /* Handle the disp32 form with no registers first. */
13589 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13590 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13591 else
13592 {
13593 /* Get the register (or SIB) value. */
13594 switch ((bRm & X86_MODRM_RM_MASK))
13595 {
13596 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13597 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13598 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13599 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13600 case 4: /* SIB */
13601 {
13602 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13603
13604 /* Get the index and scale it. */
13605 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13606 {
13607 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13608 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13609 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13610 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13611 case 4: u32EffAddr = 0; /*none */ break;
13612 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13613 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13614 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13615 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13616 }
13617 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13618
13619 /* add base */
13620 switch (bSib & X86_SIB_BASE_MASK)
13621 {
13622 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13623 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13624 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13625 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13626 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13627 case 5:
13628 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13629 {
13630 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13631 SET_SS_DEF();
13632 }
13633 else
13634 {
13635 uint32_t u32Disp;
13636 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13637 u32EffAddr += u32Disp;
13638 }
13639 break;
13640 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13641 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13642 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13643 }
13644 break;
13645 }
13646 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13647 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13648 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13649 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13650 }
13651
13652 /* Get and add the displacement. */
13653 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13654 {
13655 case 0:
13656 break;
13657 case 1:
13658 {
13659 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13660 u32EffAddr += i8Disp;
13661 break;
13662 }
13663 case 2:
13664 {
13665 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13666 u32EffAddr += u32Disp;
13667 break;
13668 }
13669 default:
13670 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13671 }
13672 }
13673
13674 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13675 {
13676 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13677 return u32EffAddr;
13678 }
13679 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13680 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13681 return u32EffAddr & UINT16_MAX;
13682 }
13683
13684 uint64_t u64EffAddr;
13685
13686 /* Handle the rip+disp32 form with no registers first. */
13687 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13688 {
13689 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13690 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13691 }
13692 else
13693 {
13694 /* Get the register (or SIB) value. */
13695 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13696 {
13697 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13698 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13699 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13700 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13701 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13702 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13703 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13704 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13705 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13706 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13707 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13708 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13709 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13710 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13711 /* SIB */
13712 case 4:
13713 case 12:
13714 {
13715 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13716
13717 /* Get the index and scale it. */
13718 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13719 {
13720 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13721 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13722 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13723 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13724 case 4: u64EffAddr = 0; /*none */ break;
13725 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13726 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13727 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13728 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13729 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13730 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13731 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13732 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13733 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13734 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13735 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13736 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13737 }
13738 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13739
13740 /* add base */
13741 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13742 {
13743 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13744 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13745 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13746 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13747 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13748 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13749 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13750 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13751 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13752 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13753 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13754 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13755 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13756 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13757 /* complicated encodings */
13758 case 5:
13759 case 13:
13760 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13761 {
13762 if (!pVCpu->iem.s.uRexB)
13763 {
13764 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13765 SET_SS_DEF();
13766 }
13767 else
13768 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13769 }
13770 else
13771 {
13772 uint32_t u32Disp;
13773 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13774 u64EffAddr += (int32_t)u32Disp;
13775 }
13776 break;
13777 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13778 }
13779 break;
13780 }
13781 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13782 }
13783
13784 /* Get and add the displacement. */
13785 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13786 {
13787 case 0:
13788 break;
13789 case 1:
13790 {
13791 int8_t i8Disp;
13792 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13793 u64EffAddr += i8Disp;
13794 break;
13795 }
13796 case 2:
13797 {
13798 uint32_t u32Disp;
13799 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13800 u64EffAddr += (int32_t)u32Disp;
13801 break;
13802 }
13803 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13804 }
13805
13806 }
13807
13808 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13809 {
13810 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13811 return u64EffAddr;
13812 }
13813 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13814 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13815 return u64EffAddr & UINT32_MAX;
13816}
13817#endif /* IEM_WITH_SETJMP */
13818
13819/** @} */
13820
13821
13822
13823/*
13824 * Include the instructions
13825 */
13826#include "IEMAllInstructions.cpp.h"
13827
13828
13829
13830#ifdef LOG_ENABLED
13831/**
13832 * Logs the current instruction.
13833 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13834 * @param fSameCtx Set if we have the same context information as the VMM,
13835 * clear if we may have already executed an instruction in
13836 * our debug context. When clear, we assume IEMCPU holds
13837 * valid CPU mode info.
13838 *
13839 * The @a fSameCtx parameter is now misleading and obsolete.
13840 * @param pszFunction The IEM function doing the execution.
13841 */
13842IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13843{
13844# ifdef IN_RING3
13845 if (LogIs2Enabled())
13846 {
13847 char szInstr[256];
13848 uint32_t cbInstr = 0;
13849 if (fSameCtx)
13850 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13851 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13852 szInstr, sizeof(szInstr), &cbInstr);
13853 else
13854 {
13855 uint32_t fFlags = 0;
13856 switch (pVCpu->iem.s.enmCpuMode)
13857 {
13858 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13859 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13860 case IEMMODE_16BIT:
13861 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13862 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13863 else
13864 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13865 break;
13866 }
13867 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13868 szInstr, sizeof(szInstr), &cbInstr);
13869 }
13870
13871 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13872 Log2(("**** %s\n"
13873 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13874 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13875 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13876 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13877 " %s\n"
13878 , pszFunction,
13879 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13880 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13881 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13882 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13883 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13884 szInstr));
13885
13886 if (LogIs3Enabled())
13887 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13888 }
13889 else
13890# endif
13891 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13892 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13893 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13894}
13895#endif /* LOG_ENABLED */
13896
13897
13898/**
13899 * Makes status code addjustments (pass up from I/O and access handler)
13900 * as well as maintaining statistics.
13901 *
13902 * @returns Strict VBox status code to pass up.
13903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13904 * @param rcStrict The status from executing an instruction.
13905 */
13906DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13907{
13908 if (rcStrict != VINF_SUCCESS)
13909 {
13910 if (RT_SUCCESS(rcStrict))
13911 {
13912 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13913 || rcStrict == VINF_IOM_R3_IOPORT_READ
13914 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13915 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13916 || rcStrict == VINF_IOM_R3_MMIO_READ
13917 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13918 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13919 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13920 || rcStrict == VINF_CPUM_R3_MSR_READ
13921 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13922 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13923 || rcStrict == VINF_EM_RAW_TO_R3
13924 || rcStrict == VINF_EM_TRIPLE_FAULT
13925 || rcStrict == VINF_GIM_R3_HYPERCALL
13926 /* raw-mode / virt handlers only: */
13927 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13928 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13929 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13930 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13931 || rcStrict == VINF_SELM_SYNC_GDT
13932 || rcStrict == VINF_CSAM_PENDING_ACTION
13933 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13934 /* nested hw.virt codes: */
13935 || rcStrict == VINF_VMX_VMEXIT
13936 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13937 || rcStrict == VINF_SVM_VMEXIT
13938 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13939/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13940 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13941#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13942 if ( rcStrict == VINF_VMX_VMEXIT
13943 && rcPassUp == VINF_SUCCESS)
13944 rcStrict = VINF_SUCCESS;
13945 else
13946#endif
13947#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13948 if ( rcStrict == VINF_SVM_VMEXIT
13949 && rcPassUp == VINF_SUCCESS)
13950 rcStrict = VINF_SUCCESS;
13951 else
13952#endif
13953 if (rcPassUp == VINF_SUCCESS)
13954 pVCpu->iem.s.cRetInfStatuses++;
13955 else if ( rcPassUp < VINF_EM_FIRST
13956 || rcPassUp > VINF_EM_LAST
13957 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13958 {
13959 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13960 pVCpu->iem.s.cRetPassUpStatus++;
13961 rcStrict = rcPassUp;
13962 }
13963 else
13964 {
13965 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13966 pVCpu->iem.s.cRetInfStatuses++;
13967 }
13968 }
13969 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13970 pVCpu->iem.s.cRetAspectNotImplemented++;
13971 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13972 pVCpu->iem.s.cRetInstrNotImplemented++;
13973 else
13974 pVCpu->iem.s.cRetErrStatuses++;
13975 }
13976 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13977 {
13978 pVCpu->iem.s.cRetPassUpStatus++;
13979 rcStrict = pVCpu->iem.s.rcPassUp;
13980 }
13981
13982 return rcStrict;
13983}
13984
13985
13986/**
13987 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13988 * IEMExecOneWithPrefetchedByPC.
13989 *
13990 * Similar code is found in IEMExecLots.
13991 *
13992 * @return Strict VBox status code.
13993 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13994 * @param fExecuteInhibit If set, execute the instruction following CLI,
13995 * POP SS and MOV SS,GR.
13996 * @param pszFunction The calling function name.
13997 */
13998DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13999{
14000 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14001 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14002 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14003 RT_NOREF_PV(pszFunction);
14004
14005#ifdef IEM_WITH_SETJMP
14006 VBOXSTRICTRC rcStrict;
14007 jmp_buf JmpBuf;
14008 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14009 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14010 if ((rcStrict = setjmp(JmpBuf)) == 0)
14011 {
14012 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14013 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14014 }
14015 else
14016 pVCpu->iem.s.cLongJumps++;
14017 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14018#else
14019 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14020 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14021#endif
14022 if (rcStrict == VINF_SUCCESS)
14023 pVCpu->iem.s.cInstructions++;
14024 if (pVCpu->iem.s.cActiveMappings > 0)
14025 {
14026 Assert(rcStrict != VINF_SUCCESS);
14027 iemMemRollback(pVCpu);
14028 }
14029 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14030 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14031 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14032
14033//#ifdef DEBUG
14034// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14035//#endif
14036
14037#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14038 /*
14039 * Perform any VMX nested-guest instruction boundary actions.
14040 *
14041 * If any of these causes a VM-exit, we must skip executing the next
14042 * instruction (would run into stale page tables). A VM-exit makes sure
14043 * there is no interrupt-inhibition, so that should ensure we don't go
14044 * to try execute the next instruction. Clearing fExecuteInhibit is
14045 * problematic because of the setjmp/longjmp clobbering above.
14046 */
14047 if ( rcStrict == VINF_SUCCESS
14048 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14049 {
14050 bool fCheckRemainingIntercepts = true;
14051 /* TPR-below threshold/APIC write has the highest priority. */
14052 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14053 {
14054 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14055 fCheckRemainingIntercepts = false;
14056 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14057 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14058 }
14059 /* MTF takes priority over VMX-preemption timer. */
14060 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14061 {
14062 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
14063 fCheckRemainingIntercepts = false;
14064 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14065 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14066 }
14067 /* VMX preemption timer takes priority over NMI-window exits. */
14068 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14069 {
14070 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14071 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14072 rcStrict = VINF_SUCCESS;
14073 else
14074 {
14075 fCheckRemainingIntercepts = false;
14076 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14077 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14078 }
14079 }
14080
14081 /*
14082 * Check remaining intercepts.
14083 *
14084 * NMI-window and Interrupt-window VM-exits.
14085 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
14086 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
14087 *
14088 * See Intel spec. 26.7.6 "NMI-Window Exiting".
14089 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
14090 */
14091 if ( fCheckRemainingIntercepts
14092 && pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents
14093 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
14094 {
14095 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
14096 && CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
14097 {
14098 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
14099 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14100 }
14101 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
14102 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
14103 {
14104 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
14105 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
14106 }
14107 }
14108 }
14109#endif
14110
14111 /* Execute the next instruction as well if a cli, pop ss or
14112 mov ss, Gr has just completed successfully. */
14113 if ( fExecuteInhibit
14114 && rcStrict == VINF_SUCCESS
14115 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14116 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14117 {
14118 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14119 if (rcStrict == VINF_SUCCESS)
14120 {
14121#ifdef LOG_ENABLED
14122 iemLogCurInstr(pVCpu, false, pszFunction);
14123#endif
14124#ifdef IEM_WITH_SETJMP
14125 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14126 if ((rcStrict = setjmp(JmpBuf)) == 0)
14127 {
14128 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14129 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14130 }
14131 else
14132 pVCpu->iem.s.cLongJumps++;
14133 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14134#else
14135 IEM_OPCODE_GET_NEXT_U8(&b);
14136 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14137#endif
14138 if (rcStrict == VINF_SUCCESS)
14139 pVCpu->iem.s.cInstructions++;
14140 if (pVCpu->iem.s.cActiveMappings > 0)
14141 {
14142 Assert(rcStrict != VINF_SUCCESS);
14143 iemMemRollback(pVCpu);
14144 }
14145 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14146 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14147 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14148 }
14149 else if (pVCpu->iem.s.cActiveMappings > 0)
14150 iemMemRollback(pVCpu);
14151 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14152 }
14153
14154 /*
14155 * Return value fiddling, statistics and sanity assertions.
14156 */
14157 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14158
14159 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14160 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14161 return rcStrict;
14162}
14163
14164
14165#ifdef IN_RC
14166/**
14167 * Re-enters raw-mode or ensure we return to ring-3.
14168 *
14169 * @returns rcStrict, maybe modified.
14170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14171 * @param rcStrict The status code returne by the interpreter.
14172 */
14173DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14174{
14175 if ( !pVCpu->iem.s.fInPatchCode
14176 && ( rcStrict == VINF_SUCCESS
14177 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14178 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14179 {
14180 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14181 CPUMRawEnter(pVCpu);
14182 else
14183 {
14184 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14185 rcStrict = VINF_EM_RESCHEDULE;
14186 }
14187 }
14188 return rcStrict;
14189}
14190#endif
14191
14192
14193/**
14194 * Execute one instruction.
14195 *
14196 * @return Strict VBox status code.
14197 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14198 */
14199VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14200{
14201#ifdef LOG_ENABLED
14202 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14203#endif
14204
14205 /*
14206 * Do the decoding and emulation.
14207 */
14208 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14209 if (rcStrict == VINF_SUCCESS)
14210 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14211 else if (pVCpu->iem.s.cActiveMappings > 0)
14212 iemMemRollback(pVCpu);
14213
14214#ifdef IN_RC
14215 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14216#endif
14217 if (rcStrict != VINF_SUCCESS)
14218 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14219 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14220 return rcStrict;
14221}
14222
14223
14224VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14225{
14226 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14227
14228 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14229 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14230 if (rcStrict == VINF_SUCCESS)
14231 {
14232 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14233 if (pcbWritten)
14234 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14235 }
14236 else if (pVCpu->iem.s.cActiveMappings > 0)
14237 iemMemRollback(pVCpu);
14238
14239#ifdef IN_RC
14240 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14241#endif
14242 return rcStrict;
14243}
14244
14245
14246VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14247 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14248{
14249 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14250
14251 VBOXSTRICTRC rcStrict;
14252 if ( cbOpcodeBytes
14253 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14254 {
14255 iemInitDecoder(pVCpu, false);
14256#ifdef IEM_WITH_CODE_TLB
14257 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14258 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14259 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14260 pVCpu->iem.s.offCurInstrStart = 0;
14261 pVCpu->iem.s.offInstrNextByte = 0;
14262#else
14263 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14264 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14265#endif
14266 rcStrict = VINF_SUCCESS;
14267 }
14268 else
14269 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14270 if (rcStrict == VINF_SUCCESS)
14271 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14272 else if (pVCpu->iem.s.cActiveMappings > 0)
14273 iemMemRollback(pVCpu);
14274
14275#ifdef IN_RC
14276 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14277#endif
14278 return rcStrict;
14279}
14280
14281
14282VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14283{
14284 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14285
14286 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14287 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14288 if (rcStrict == VINF_SUCCESS)
14289 {
14290 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14291 if (pcbWritten)
14292 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14293 }
14294 else if (pVCpu->iem.s.cActiveMappings > 0)
14295 iemMemRollback(pVCpu);
14296
14297#ifdef IN_RC
14298 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14299#endif
14300 return rcStrict;
14301}
14302
14303
14304VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14305 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14306{
14307 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14308
14309 VBOXSTRICTRC rcStrict;
14310 if ( cbOpcodeBytes
14311 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14312 {
14313 iemInitDecoder(pVCpu, true);
14314#ifdef IEM_WITH_CODE_TLB
14315 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14316 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14317 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14318 pVCpu->iem.s.offCurInstrStart = 0;
14319 pVCpu->iem.s.offInstrNextByte = 0;
14320#else
14321 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14322 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14323#endif
14324 rcStrict = VINF_SUCCESS;
14325 }
14326 else
14327 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14328 if (rcStrict == VINF_SUCCESS)
14329 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14330 else if (pVCpu->iem.s.cActiveMappings > 0)
14331 iemMemRollback(pVCpu);
14332
14333#ifdef IN_RC
14334 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14335#endif
14336 return rcStrict;
14337}
14338
14339
14340/**
14341 * For debugging DISGetParamSize, may come in handy.
14342 *
14343 * @returns Strict VBox status code.
14344 * @param pVCpu The cross context virtual CPU structure of the
14345 * calling EMT.
14346 * @param pCtxCore The context core structure.
14347 * @param OpcodeBytesPC The PC of the opcode bytes.
14348 * @param pvOpcodeBytes Prefeched opcode bytes.
14349 * @param cbOpcodeBytes Number of prefetched bytes.
14350 * @param pcbWritten Where to return the number of bytes written.
14351 * Optional.
14352 */
14353VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14354 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14355 uint32_t *pcbWritten)
14356{
14357 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14358
14359 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14360 VBOXSTRICTRC rcStrict;
14361 if ( cbOpcodeBytes
14362 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14363 {
14364 iemInitDecoder(pVCpu, true);
14365#ifdef IEM_WITH_CODE_TLB
14366 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14367 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14368 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14369 pVCpu->iem.s.offCurInstrStart = 0;
14370 pVCpu->iem.s.offInstrNextByte = 0;
14371#else
14372 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14373 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14374#endif
14375 rcStrict = VINF_SUCCESS;
14376 }
14377 else
14378 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14379 if (rcStrict == VINF_SUCCESS)
14380 {
14381 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14382 if (pcbWritten)
14383 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14384 }
14385 else if (pVCpu->iem.s.cActiveMappings > 0)
14386 iemMemRollback(pVCpu);
14387
14388#ifdef IN_RC
14389 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14390#endif
14391 return rcStrict;
14392}
14393
14394
14395VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14396{
14397 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14398 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14399
14400 /*
14401 * See if there is an interrupt pending in TRPM, inject it if we can.
14402 */
14403 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14404#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14405 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14406 if (fIntrEnabled)
14407 {
14408 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14409 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14410 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14411 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14412 else
14413 {
14414 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14415 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14416 }
14417 }
14418#else
14419 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14420#endif
14421
14422 /** @todo What if we are injecting an exception and not an interrupt? Is that
14423 * possible here? */
14424 if ( fIntrEnabled
14425 && TRPMHasTrap(pVCpu)
14426 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14427 {
14428 uint8_t u8TrapNo;
14429 TRPMEVENT enmType;
14430 RTGCUINT uErrCode;
14431 RTGCPTR uCr2;
14432 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14433 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14434 TRPMResetTrap(pVCpu);
14435#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14436 /* Injecting an event may cause a VM-exit. */
14437 if ( rcStrict != VINF_SUCCESS
14438 && rcStrict != VINF_IEM_RAISED_XCPT)
14439 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14440#else
14441 NOREF(rcStrict);
14442#endif
14443 }
14444
14445 /*
14446 * Initial decoder init w/ prefetch, then setup setjmp.
14447 */
14448 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14449 if (rcStrict == VINF_SUCCESS)
14450 {
14451#ifdef IEM_WITH_SETJMP
14452 jmp_buf JmpBuf;
14453 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14454 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14455 pVCpu->iem.s.cActiveMappings = 0;
14456 if ((rcStrict = setjmp(JmpBuf)) == 0)
14457#endif
14458 {
14459 /*
14460 * The run loop. We limit ourselves to 4096 instructions right now.
14461 */
14462 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14463 PVM pVM = pVCpu->CTX_SUFF(pVM);
14464 for (;;)
14465 {
14466 /*
14467 * Log the state.
14468 */
14469#ifdef LOG_ENABLED
14470 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14471#endif
14472
14473 /*
14474 * Do the decoding and emulation.
14475 */
14476 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14477 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14478 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14479 {
14480 Assert(pVCpu->iem.s.cActiveMappings == 0);
14481 pVCpu->iem.s.cInstructions++;
14482 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14483 {
14484 uint64_t fCpu = pVCpu->fLocalForcedActions
14485 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14486 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14487 | VMCPU_FF_TLB_FLUSH
14488#ifdef VBOX_WITH_RAW_MODE
14489 | VMCPU_FF_TRPM_SYNC_IDT
14490 | VMCPU_FF_SELM_SYNC_TSS
14491 | VMCPU_FF_SELM_SYNC_GDT
14492 | VMCPU_FF_SELM_SYNC_LDT
14493#endif
14494 | VMCPU_FF_INHIBIT_INTERRUPTS
14495 | VMCPU_FF_BLOCK_NMIS
14496 | VMCPU_FF_UNHALT ));
14497
14498 if (RT_LIKELY( ( !fCpu
14499 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14500 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14501 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14502 {
14503 if (cMaxInstructionsGccStupidity-- > 0)
14504 {
14505 /* Poll timers every now an then according to the caller's specs. */
14506 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14507 || !TMTimerPollBool(pVM, pVCpu))
14508 {
14509 Assert(pVCpu->iem.s.cActiveMappings == 0);
14510 iemReInitDecoder(pVCpu);
14511 continue;
14512 }
14513 }
14514 }
14515 }
14516 Assert(pVCpu->iem.s.cActiveMappings == 0);
14517 }
14518 else if (pVCpu->iem.s.cActiveMappings > 0)
14519 iemMemRollback(pVCpu);
14520 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14521 break;
14522 }
14523 }
14524#ifdef IEM_WITH_SETJMP
14525 else
14526 {
14527 if (pVCpu->iem.s.cActiveMappings > 0)
14528 iemMemRollback(pVCpu);
14529# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14530 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14531# endif
14532 pVCpu->iem.s.cLongJumps++;
14533 }
14534 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14535#endif
14536
14537 /*
14538 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14539 */
14540 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14541 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14542 }
14543 else
14544 {
14545 if (pVCpu->iem.s.cActiveMappings > 0)
14546 iemMemRollback(pVCpu);
14547
14548#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14549 /*
14550 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14551 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14552 */
14553 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14554#endif
14555 }
14556
14557 /*
14558 * Maybe re-enter raw-mode and log.
14559 */
14560#ifdef IN_RC
14561 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14562#endif
14563 if (rcStrict != VINF_SUCCESS)
14564 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14565 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14566 if (pcInstructions)
14567 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14568 return rcStrict;
14569}
14570
14571
14572/**
14573 * Interface used by EMExecuteExec, does exit statistics and limits.
14574 *
14575 * @returns Strict VBox status code.
14576 * @param pVCpu The cross context virtual CPU structure.
14577 * @param fWillExit To be defined.
14578 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14579 * @param cMaxInstructions Maximum number of instructions to execute.
14580 * @param cMaxInstructionsWithoutExits
14581 * The max number of instructions without exits.
14582 * @param pStats Where to return statistics.
14583 */
14584VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14585 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14586{
14587 NOREF(fWillExit); /** @todo define flexible exit crits */
14588
14589 /*
14590 * Initialize return stats.
14591 */
14592 pStats->cInstructions = 0;
14593 pStats->cExits = 0;
14594 pStats->cMaxExitDistance = 0;
14595 pStats->cReserved = 0;
14596
14597 /*
14598 * Initial decoder init w/ prefetch, then setup setjmp.
14599 */
14600 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14601 if (rcStrict == VINF_SUCCESS)
14602 {
14603#ifdef IEM_WITH_SETJMP
14604 jmp_buf JmpBuf;
14605 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14606 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14607 pVCpu->iem.s.cActiveMappings = 0;
14608 if ((rcStrict = setjmp(JmpBuf)) == 0)
14609#endif
14610 {
14611#ifdef IN_RING0
14612 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14613#endif
14614 uint32_t cInstructionSinceLastExit = 0;
14615
14616 /*
14617 * The run loop. We limit ourselves to 4096 instructions right now.
14618 */
14619 PVM pVM = pVCpu->CTX_SUFF(pVM);
14620 for (;;)
14621 {
14622 /*
14623 * Log the state.
14624 */
14625#ifdef LOG_ENABLED
14626 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14627#endif
14628
14629 /*
14630 * Do the decoding and emulation.
14631 */
14632 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14633
14634 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14635 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14636
14637 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14638 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14639 {
14640 pStats->cExits += 1;
14641 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14642 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14643 cInstructionSinceLastExit = 0;
14644 }
14645
14646 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14647 {
14648 Assert(pVCpu->iem.s.cActiveMappings == 0);
14649 pVCpu->iem.s.cInstructions++;
14650 pStats->cInstructions++;
14651 cInstructionSinceLastExit++;
14652 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14653 {
14654 uint64_t fCpu = pVCpu->fLocalForcedActions
14655 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14656 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14657 | VMCPU_FF_TLB_FLUSH
14658#ifdef VBOX_WITH_RAW_MODE
14659 | VMCPU_FF_TRPM_SYNC_IDT
14660 | VMCPU_FF_SELM_SYNC_TSS
14661 | VMCPU_FF_SELM_SYNC_GDT
14662 | VMCPU_FF_SELM_SYNC_LDT
14663#endif
14664 | VMCPU_FF_INHIBIT_INTERRUPTS
14665 | VMCPU_FF_BLOCK_NMIS
14666 | VMCPU_FF_UNHALT ));
14667
14668 if (RT_LIKELY( ( ( !fCpu
14669 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14670 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14671 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14672 || pStats->cInstructions < cMinInstructions))
14673 {
14674 if (pStats->cInstructions < cMaxInstructions)
14675 {
14676 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14677 {
14678#ifdef IN_RING0
14679 if ( !fCheckPreemptionPending
14680 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14681#endif
14682 {
14683 Assert(pVCpu->iem.s.cActiveMappings == 0);
14684 iemReInitDecoder(pVCpu);
14685 continue;
14686 }
14687#ifdef IN_RING0
14688 rcStrict = VINF_EM_RAW_INTERRUPT;
14689 break;
14690#endif
14691 }
14692 }
14693 }
14694 Assert(!(fCpu & VMCPU_FF_IEM));
14695 }
14696 Assert(pVCpu->iem.s.cActiveMappings == 0);
14697 }
14698 else if (pVCpu->iem.s.cActiveMappings > 0)
14699 iemMemRollback(pVCpu);
14700 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14701 break;
14702 }
14703 }
14704#ifdef IEM_WITH_SETJMP
14705 else
14706 {
14707 if (pVCpu->iem.s.cActiveMappings > 0)
14708 iemMemRollback(pVCpu);
14709 pVCpu->iem.s.cLongJumps++;
14710 }
14711 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14712#endif
14713
14714 /*
14715 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14716 */
14717 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14718 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14719 }
14720 else
14721 {
14722 if (pVCpu->iem.s.cActiveMappings > 0)
14723 iemMemRollback(pVCpu);
14724
14725#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14726 /*
14727 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14728 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14729 */
14730 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14731#endif
14732 }
14733
14734 /*
14735 * Maybe re-enter raw-mode and log.
14736 */
14737#ifdef IN_RC
14738 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14739#endif
14740 if (rcStrict != VINF_SUCCESS)
14741 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14742 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14743 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14744 return rcStrict;
14745}
14746
14747
14748/**
14749 * Injects a trap, fault, abort, software interrupt or external interrupt.
14750 *
14751 * The parameter list matches TRPMQueryTrapAll pretty closely.
14752 *
14753 * @returns Strict VBox status code.
14754 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14755 * @param u8TrapNo The trap number.
14756 * @param enmType What type is it (trap/fault/abort), software
14757 * interrupt or hardware interrupt.
14758 * @param uErrCode The error code if applicable.
14759 * @param uCr2 The CR2 value if applicable.
14760 * @param cbInstr The instruction length (only relevant for
14761 * software interrupts).
14762 */
14763VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14764 uint8_t cbInstr)
14765{
14766 iemInitDecoder(pVCpu, false);
14767#ifdef DBGFTRACE_ENABLED
14768 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14769 u8TrapNo, enmType, uErrCode, uCr2);
14770#endif
14771
14772 uint32_t fFlags;
14773 switch (enmType)
14774 {
14775 case TRPM_HARDWARE_INT:
14776 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14777 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14778 uErrCode = uCr2 = 0;
14779 break;
14780
14781 case TRPM_SOFTWARE_INT:
14782 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14783 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14784 uErrCode = uCr2 = 0;
14785 break;
14786
14787 case TRPM_TRAP:
14788 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14789 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14790 if (u8TrapNo == X86_XCPT_PF)
14791 fFlags |= IEM_XCPT_FLAGS_CR2;
14792 switch (u8TrapNo)
14793 {
14794 case X86_XCPT_DF:
14795 case X86_XCPT_TS:
14796 case X86_XCPT_NP:
14797 case X86_XCPT_SS:
14798 case X86_XCPT_PF:
14799 case X86_XCPT_AC:
14800 fFlags |= IEM_XCPT_FLAGS_ERR;
14801 break;
14802 }
14803 break;
14804
14805 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14806 }
14807
14808 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14809
14810 if (pVCpu->iem.s.cActiveMappings > 0)
14811 iemMemRollback(pVCpu);
14812
14813 return rcStrict;
14814}
14815
14816
14817/**
14818 * Injects the active TRPM event.
14819 *
14820 * @returns Strict VBox status code.
14821 * @param pVCpu The cross context virtual CPU structure.
14822 */
14823VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14824{
14825#ifndef IEM_IMPLEMENTS_TASKSWITCH
14826 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14827#else
14828 uint8_t u8TrapNo;
14829 TRPMEVENT enmType;
14830 RTGCUINT uErrCode;
14831 RTGCUINTPTR uCr2;
14832 uint8_t cbInstr;
14833 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14834 if (RT_FAILURE(rc))
14835 return rc;
14836
14837 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14838#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14839 if (rcStrict == VINF_SVM_VMEXIT)
14840 rcStrict = VINF_SUCCESS;
14841#endif
14842#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14843 if (rcStrict == VINF_VMX_VMEXIT)
14844 rcStrict = VINF_SUCCESS;
14845#endif
14846 /** @todo Are there any other codes that imply the event was successfully
14847 * delivered to the guest? See @bugref{6607}. */
14848 if ( rcStrict == VINF_SUCCESS
14849 || rcStrict == VINF_IEM_RAISED_XCPT)
14850 TRPMResetTrap(pVCpu);
14851
14852 return rcStrict;
14853#endif
14854}
14855
14856
14857VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14858{
14859 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14860 return VERR_NOT_IMPLEMENTED;
14861}
14862
14863
14864VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14865{
14866 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14867 return VERR_NOT_IMPLEMENTED;
14868}
14869
14870
14871#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14872/**
14873 * Executes a IRET instruction with default operand size.
14874 *
14875 * This is for PATM.
14876 *
14877 * @returns VBox status code.
14878 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14879 * @param pCtxCore The register frame.
14880 */
14881VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14882{
14883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14884
14885 iemCtxCoreToCtx(pCtx, pCtxCore);
14886 iemInitDecoder(pVCpu);
14887 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14888 if (rcStrict == VINF_SUCCESS)
14889 iemCtxToCtxCore(pCtxCore, pCtx);
14890 else
14891 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14892 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14893 return rcStrict;
14894}
14895#endif
14896
14897
14898/**
14899 * Macro used by the IEMExec* method to check the given instruction length.
14900 *
14901 * Will return on failure!
14902 *
14903 * @param a_cbInstr The given instruction length.
14904 * @param a_cbMin The minimum length.
14905 */
14906#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14907 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14908 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14909
14910
14911/**
14912 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14913 *
14914 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14915 *
14916 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14918 * @param rcStrict The status code to fiddle.
14919 */
14920DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14921{
14922 iemUninitExec(pVCpu);
14923#ifdef IN_RC
14924 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14925#else
14926 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14927#endif
14928}
14929
14930
14931/**
14932 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14933 *
14934 * This API ASSUMES that the caller has already verified that the guest code is
14935 * allowed to access the I/O port. (The I/O port is in the DX register in the
14936 * guest state.)
14937 *
14938 * @returns Strict VBox status code.
14939 * @param pVCpu The cross context virtual CPU structure.
14940 * @param cbValue The size of the I/O port access (1, 2, or 4).
14941 * @param enmAddrMode The addressing mode.
14942 * @param fRepPrefix Indicates whether a repeat prefix is used
14943 * (doesn't matter which for this instruction).
14944 * @param cbInstr The instruction length in bytes.
14945 * @param iEffSeg The effective segment address.
14946 * @param fIoChecked Whether the access to the I/O port has been
14947 * checked or not. It's typically checked in the
14948 * HM scenario.
14949 */
14950VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14951 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14952{
14953 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14954 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14955
14956 /*
14957 * State init.
14958 */
14959 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14960
14961 /*
14962 * Switch orgy for getting to the right handler.
14963 */
14964 VBOXSTRICTRC rcStrict;
14965 if (fRepPrefix)
14966 {
14967 switch (enmAddrMode)
14968 {
14969 case IEMMODE_16BIT:
14970 switch (cbValue)
14971 {
14972 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14973 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14974 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14975 default:
14976 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14977 }
14978 break;
14979
14980 case IEMMODE_32BIT:
14981 switch (cbValue)
14982 {
14983 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14984 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14985 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14986 default:
14987 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14988 }
14989 break;
14990
14991 case IEMMODE_64BIT:
14992 switch (cbValue)
14993 {
14994 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14995 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14996 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14997 default:
14998 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14999 }
15000 break;
15001
15002 default:
15003 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15004 }
15005 }
15006 else
15007 {
15008 switch (enmAddrMode)
15009 {
15010 case IEMMODE_16BIT:
15011 switch (cbValue)
15012 {
15013 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15014 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15015 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15016 default:
15017 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15018 }
15019 break;
15020
15021 case IEMMODE_32BIT:
15022 switch (cbValue)
15023 {
15024 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15025 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15026 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15027 default:
15028 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15029 }
15030 break;
15031
15032 case IEMMODE_64BIT:
15033 switch (cbValue)
15034 {
15035 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15036 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15037 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15038 default:
15039 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15040 }
15041 break;
15042
15043 default:
15044 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15045 }
15046 }
15047
15048 if (pVCpu->iem.s.cActiveMappings)
15049 iemMemRollback(pVCpu);
15050
15051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15052}
15053
15054
15055/**
15056 * Interface for HM and EM for executing string I/O IN (read) instructions.
15057 *
15058 * This API ASSUMES that the caller has already verified that the guest code is
15059 * allowed to access the I/O port. (The I/O port is in the DX register in the
15060 * guest state.)
15061 *
15062 * @returns Strict VBox status code.
15063 * @param pVCpu The cross context virtual CPU structure.
15064 * @param cbValue The size of the I/O port access (1, 2, or 4).
15065 * @param enmAddrMode The addressing mode.
15066 * @param fRepPrefix Indicates whether a repeat prefix is used
15067 * (doesn't matter which for this instruction).
15068 * @param cbInstr The instruction length in bytes.
15069 * @param fIoChecked Whether the access to the I/O port has been
15070 * checked or not. It's typically checked in the
15071 * HM scenario.
15072 */
15073VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15074 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15075{
15076 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15077
15078 /*
15079 * State init.
15080 */
15081 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15082
15083 /*
15084 * Switch orgy for getting to the right handler.
15085 */
15086 VBOXSTRICTRC rcStrict;
15087 if (fRepPrefix)
15088 {
15089 switch (enmAddrMode)
15090 {
15091 case IEMMODE_16BIT:
15092 switch (cbValue)
15093 {
15094 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15095 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15096 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15097 default:
15098 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15099 }
15100 break;
15101
15102 case IEMMODE_32BIT:
15103 switch (cbValue)
15104 {
15105 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15106 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15107 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15108 default:
15109 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15110 }
15111 break;
15112
15113 case IEMMODE_64BIT:
15114 switch (cbValue)
15115 {
15116 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15117 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15118 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15119 default:
15120 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15121 }
15122 break;
15123
15124 default:
15125 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15126 }
15127 }
15128 else
15129 {
15130 switch (enmAddrMode)
15131 {
15132 case IEMMODE_16BIT:
15133 switch (cbValue)
15134 {
15135 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15136 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15137 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15138 default:
15139 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15140 }
15141 break;
15142
15143 case IEMMODE_32BIT:
15144 switch (cbValue)
15145 {
15146 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15147 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15148 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15149 default:
15150 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15151 }
15152 break;
15153
15154 case IEMMODE_64BIT:
15155 switch (cbValue)
15156 {
15157 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15158 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15159 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15160 default:
15161 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15162 }
15163 break;
15164
15165 default:
15166 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15167 }
15168 }
15169
15170 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15171 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15172}
15173
15174
15175/**
15176 * Interface for rawmode to write execute an OUT instruction.
15177 *
15178 * @returns Strict VBox status code.
15179 * @param pVCpu The cross context virtual CPU structure.
15180 * @param cbInstr The instruction length in bytes.
15181 * @param u16Port The port to read.
15182 * @param fImm Whether the port is specified using an immediate operand or
15183 * using the implicit DX register.
15184 * @param cbReg The register size.
15185 *
15186 * @remarks In ring-0 not all of the state needs to be synced in.
15187 */
15188VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15189{
15190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15191 Assert(cbReg <= 4 && cbReg != 3);
15192
15193 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15194 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15195 Assert(!pVCpu->iem.s.cActiveMappings);
15196 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15197}
15198
15199
15200/**
15201 * Interface for rawmode to write execute an IN instruction.
15202 *
15203 * @returns Strict VBox status code.
15204 * @param pVCpu The cross context virtual CPU structure.
15205 * @param cbInstr The instruction length in bytes.
15206 * @param u16Port The port to read.
15207 * @param fImm Whether the port is specified using an immediate operand or
15208 * using the implicit DX.
15209 * @param cbReg The register size.
15210 */
15211VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15212{
15213 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15214 Assert(cbReg <= 4 && cbReg != 3);
15215
15216 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15217 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15218 Assert(!pVCpu->iem.s.cActiveMappings);
15219 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15220}
15221
15222
15223/**
15224 * Interface for HM and EM to write to a CRx register.
15225 *
15226 * @returns Strict VBox status code.
15227 * @param pVCpu The cross context virtual CPU structure.
15228 * @param cbInstr The instruction length in bytes.
15229 * @param iCrReg The control register number (destination).
15230 * @param iGReg The general purpose register number (source).
15231 *
15232 * @remarks In ring-0 not all of the state needs to be synced in.
15233 */
15234VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15235{
15236 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15237 Assert(iCrReg < 16);
15238 Assert(iGReg < 16);
15239
15240 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15241 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15242 Assert(!pVCpu->iem.s.cActiveMappings);
15243 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15244}
15245
15246
15247/**
15248 * Interface for HM and EM to read from a CRx register.
15249 *
15250 * @returns Strict VBox status code.
15251 * @param pVCpu The cross context virtual CPU structure.
15252 * @param cbInstr The instruction length in bytes.
15253 * @param iGReg The general purpose register number (destination).
15254 * @param iCrReg The control register number (source).
15255 *
15256 * @remarks In ring-0 not all of the state needs to be synced in.
15257 */
15258VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15259{
15260 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15261 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15262 | CPUMCTX_EXTRN_APIC_TPR);
15263 Assert(iCrReg < 16);
15264 Assert(iGReg < 16);
15265
15266 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15267 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15268 Assert(!pVCpu->iem.s.cActiveMappings);
15269 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15270}
15271
15272
15273/**
15274 * Interface for HM and EM to clear the CR0[TS] bit.
15275 *
15276 * @returns Strict VBox status code.
15277 * @param pVCpu The cross context virtual CPU structure.
15278 * @param cbInstr The instruction length in bytes.
15279 *
15280 * @remarks In ring-0 not all of the state needs to be synced in.
15281 */
15282VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15283{
15284 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15285
15286 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15287 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15288 Assert(!pVCpu->iem.s.cActiveMappings);
15289 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15290}
15291
15292
15293/**
15294 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15295 *
15296 * @returns Strict VBox status code.
15297 * @param pVCpu The cross context virtual CPU structure.
15298 * @param cbInstr The instruction length in bytes.
15299 * @param uValue The value to load into CR0.
15300 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15301 * memory operand. Otherwise pass NIL_RTGCPTR.
15302 *
15303 * @remarks In ring-0 not all of the state needs to be synced in.
15304 */
15305VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15306{
15307 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15308
15309 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15310 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15311 Assert(!pVCpu->iem.s.cActiveMappings);
15312 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15313}
15314
15315
15316/**
15317 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15318 *
15319 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15320 *
15321 * @returns Strict VBox status code.
15322 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15323 * @param cbInstr The instruction length in bytes.
15324 * @remarks In ring-0 not all of the state needs to be synced in.
15325 * @thread EMT(pVCpu)
15326 */
15327VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15328{
15329 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15330
15331 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15332 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15333 Assert(!pVCpu->iem.s.cActiveMappings);
15334 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15335}
15336
15337
15338/**
15339 * Interface for HM and EM to emulate the WBINVD instruction.
15340 *
15341 * @returns Strict VBox status code.
15342 * @param pVCpu The cross context virtual CPU structure.
15343 * @param cbInstr The instruction length in bytes.
15344 *
15345 * @remarks In ring-0 not all of the state needs to be synced in.
15346 */
15347VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15348{
15349 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15350
15351 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15352 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15353 Assert(!pVCpu->iem.s.cActiveMappings);
15354 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15355}
15356
15357
15358/**
15359 * Interface for HM and EM to emulate the INVD instruction.
15360 *
15361 * @returns Strict VBox status code.
15362 * @param pVCpu The cross context virtual CPU structure.
15363 * @param cbInstr The instruction length in bytes.
15364 *
15365 * @remarks In ring-0 not all of the state needs to be synced in.
15366 */
15367VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15368{
15369 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15370
15371 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15372 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15373 Assert(!pVCpu->iem.s.cActiveMappings);
15374 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15375}
15376
15377
15378/**
15379 * Interface for HM and EM to emulate the INVLPG instruction.
15380 *
15381 * @returns Strict VBox status code.
15382 * @retval VINF_PGM_SYNC_CR3
15383 *
15384 * @param pVCpu The cross context virtual CPU structure.
15385 * @param cbInstr The instruction length in bytes.
15386 * @param GCPtrPage The effective address of the page to invalidate.
15387 *
15388 * @remarks In ring-0 not all of the state needs to be synced in.
15389 */
15390VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15391{
15392 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15393
15394 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15395 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15396 Assert(!pVCpu->iem.s.cActiveMappings);
15397 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15398}
15399
15400
15401/**
15402 * Interface for HM and EM to emulate the CPUID instruction.
15403 *
15404 * @returns Strict VBox status code.
15405 *
15406 * @param pVCpu The cross context virtual CPU structure.
15407 * @param cbInstr The instruction length in bytes.
15408 *
15409 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15410 */
15411VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15412{
15413 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15414 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15415
15416 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15417 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15418 Assert(!pVCpu->iem.s.cActiveMappings);
15419 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15420}
15421
15422
15423/**
15424 * Interface for HM and EM to emulate the RDPMC instruction.
15425 *
15426 * @returns Strict VBox status code.
15427 *
15428 * @param pVCpu The cross context virtual CPU structure.
15429 * @param cbInstr The instruction length in bytes.
15430 *
15431 * @remarks Not all of the state needs to be synced in.
15432 */
15433VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15434{
15435 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15436 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15437
15438 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15439 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15440 Assert(!pVCpu->iem.s.cActiveMappings);
15441 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15442}
15443
15444
15445/**
15446 * Interface for HM and EM to emulate the RDTSC instruction.
15447 *
15448 * @returns Strict VBox status code.
15449 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15450 *
15451 * @param pVCpu The cross context virtual CPU structure.
15452 * @param cbInstr The instruction length in bytes.
15453 *
15454 * @remarks Not all of the state needs to be synced in.
15455 */
15456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15457{
15458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15459 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15460
15461 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15463 Assert(!pVCpu->iem.s.cActiveMappings);
15464 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15465}
15466
15467
15468/**
15469 * Interface for HM and EM to emulate the RDTSCP instruction.
15470 *
15471 * @returns Strict VBox status code.
15472 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15473 *
15474 * @param pVCpu The cross context virtual CPU structure.
15475 * @param cbInstr The instruction length in bytes.
15476 *
15477 * @remarks Not all of the state needs to be synced in. Recommended
15478 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15479 */
15480VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15481{
15482 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15483 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15484
15485 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15486 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15487 Assert(!pVCpu->iem.s.cActiveMappings);
15488 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15489}
15490
15491
15492/**
15493 * Interface for HM and EM to emulate the RDMSR instruction.
15494 *
15495 * @returns Strict VBox status code.
15496 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15497 *
15498 * @param pVCpu The cross context virtual CPU structure.
15499 * @param cbInstr The instruction length in bytes.
15500 *
15501 * @remarks Not all of the state needs to be synced in. Requires RCX and
15502 * (currently) all MSRs.
15503 */
15504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15505{
15506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15507 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15508
15509 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15510 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15511 Assert(!pVCpu->iem.s.cActiveMappings);
15512 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15513}
15514
15515
15516/**
15517 * Interface for HM and EM to emulate the WRMSR instruction.
15518 *
15519 * @returns Strict VBox status code.
15520 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15521 *
15522 * @param pVCpu The cross context virtual CPU structure.
15523 * @param cbInstr The instruction length in bytes.
15524 *
15525 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15526 * and (currently) all MSRs.
15527 */
15528VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15529{
15530 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15531 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15532 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15533
15534 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15535 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15536 Assert(!pVCpu->iem.s.cActiveMappings);
15537 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15538}
15539
15540
15541/**
15542 * Interface for HM and EM to emulate the MONITOR instruction.
15543 *
15544 * @returns Strict VBox status code.
15545 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15546 *
15547 * @param pVCpu The cross context virtual CPU structure.
15548 * @param cbInstr The instruction length in bytes.
15549 *
15550 * @remarks Not all of the state needs to be synced in.
15551 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15552 * are used.
15553 */
15554VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15555{
15556 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15557 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15558
15559 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15560 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15561 Assert(!pVCpu->iem.s.cActiveMappings);
15562 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15563}
15564
15565
15566/**
15567 * Interface for HM and EM to emulate the MWAIT instruction.
15568 *
15569 * @returns Strict VBox status code.
15570 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15571 *
15572 * @param pVCpu The cross context virtual CPU structure.
15573 * @param cbInstr The instruction length in bytes.
15574 *
15575 * @remarks Not all of the state needs to be synced in.
15576 */
15577VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15578{
15579 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15580 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15581
15582 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15583 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15584 Assert(!pVCpu->iem.s.cActiveMappings);
15585 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15586}
15587
15588
15589/**
15590 * Interface for HM and EM to emulate the HLT instruction.
15591 *
15592 * @returns Strict VBox status code.
15593 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15594 *
15595 * @param pVCpu The cross context virtual CPU structure.
15596 * @param cbInstr The instruction length in bytes.
15597 *
15598 * @remarks Not all of the state needs to be synced in.
15599 */
15600VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15601{
15602 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15603
15604 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15605 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15606 Assert(!pVCpu->iem.s.cActiveMappings);
15607 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15608}
15609
15610
15611/**
15612 * Checks if IEM is in the process of delivering an event (interrupt or
15613 * exception).
15614 *
15615 * @returns true if we're in the process of raising an interrupt or exception,
15616 * false otherwise.
15617 * @param pVCpu The cross context virtual CPU structure.
15618 * @param puVector Where to store the vector associated with the
15619 * currently delivered event, optional.
15620 * @param pfFlags Where to store th event delivery flags (see
15621 * IEM_XCPT_FLAGS_XXX), optional.
15622 * @param puErr Where to store the error code associated with the
15623 * event, optional.
15624 * @param puCr2 Where to store the CR2 associated with the event,
15625 * optional.
15626 * @remarks The caller should check the flags to determine if the error code and
15627 * CR2 are valid for the event.
15628 */
15629VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15630{
15631 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15632 if (fRaisingXcpt)
15633 {
15634 if (puVector)
15635 *puVector = pVCpu->iem.s.uCurXcpt;
15636 if (pfFlags)
15637 *pfFlags = pVCpu->iem.s.fCurXcpt;
15638 if (puErr)
15639 *puErr = pVCpu->iem.s.uCurXcptErr;
15640 if (puCr2)
15641 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15642 }
15643 return fRaisingXcpt;
15644}
15645
15646#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15647
15648/**
15649 * Interface for HM and EM to emulate the CLGI instruction.
15650 *
15651 * @returns Strict VBox status code.
15652 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15653 * @param cbInstr The instruction length in bytes.
15654 * @thread EMT(pVCpu)
15655 */
15656VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15657{
15658 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15659
15660 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15661 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15662 Assert(!pVCpu->iem.s.cActiveMappings);
15663 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15664}
15665
15666
15667/**
15668 * Interface for HM and EM to emulate the STGI instruction.
15669 *
15670 * @returns Strict VBox status code.
15671 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15672 * @param cbInstr The instruction length in bytes.
15673 * @thread EMT(pVCpu)
15674 */
15675VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15676{
15677 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15678
15679 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15680 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15681 Assert(!pVCpu->iem.s.cActiveMappings);
15682 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15683}
15684
15685
15686/**
15687 * Interface for HM and EM to emulate the VMLOAD instruction.
15688 *
15689 * @returns Strict VBox status code.
15690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15691 * @param cbInstr The instruction length in bytes.
15692 * @thread EMT(pVCpu)
15693 */
15694VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15695{
15696 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15697
15698 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15699 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15700 Assert(!pVCpu->iem.s.cActiveMappings);
15701 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15702}
15703
15704
15705/**
15706 * Interface for HM and EM to emulate the VMSAVE instruction.
15707 *
15708 * @returns Strict VBox status code.
15709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15710 * @param cbInstr The instruction length in bytes.
15711 * @thread EMT(pVCpu)
15712 */
15713VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15714{
15715 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15716
15717 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15718 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15719 Assert(!pVCpu->iem.s.cActiveMappings);
15720 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15721}
15722
15723
15724/**
15725 * Interface for HM and EM to emulate the INVLPGA instruction.
15726 *
15727 * @returns Strict VBox status code.
15728 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15729 * @param cbInstr The instruction length in bytes.
15730 * @thread EMT(pVCpu)
15731 */
15732VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15733{
15734 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15735
15736 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15737 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15738 Assert(!pVCpu->iem.s.cActiveMappings);
15739 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15740}
15741
15742
15743/**
15744 * Interface for HM and EM to emulate the VMRUN instruction.
15745 *
15746 * @returns Strict VBox status code.
15747 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15748 * @param cbInstr The instruction length in bytes.
15749 * @thread EMT(pVCpu)
15750 */
15751VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15752{
15753 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15754 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15755
15756 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15757 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15758 Assert(!pVCpu->iem.s.cActiveMappings);
15759 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15760}
15761
15762
15763/**
15764 * Interface for HM and EM to emulate \#VMEXIT.
15765 *
15766 * @returns Strict VBox status code.
15767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15768 * @param uExitCode The exit code.
15769 * @param uExitInfo1 The exit info. 1 field.
15770 * @param uExitInfo2 The exit info. 2 field.
15771 * @thread EMT(pVCpu)
15772 */
15773VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15774{
15775 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15776 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15777 if (pVCpu->iem.s.cActiveMappings)
15778 iemMemRollback(pVCpu);
15779 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15780}
15781
15782#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15783
15784#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15785
15786/**
15787 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15788 *
15789 * @returns Strict VBox status code.
15790 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15791 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15792 * the x2APIC device.
15793 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15794 *
15795 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15796 * @param idMsr The MSR being read.
15797 * @param pu64Value Pointer to the value being written or where to store the
15798 * value being read.
15799 * @param fWrite Whether this is an MSR write or read access.
15800 * @thread EMT(pVCpu)
15801 */
15802VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15803{
15804 Assert(pu64Value);
15805
15806 VBOXSTRICTRC rcStrict;
15807 if (fWrite)
15808 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15809 else
15810 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15811 Assert(!pVCpu->iem.s.cActiveMappings);
15812 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15813
15814}
15815
15816
15817/**
15818 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15819 *
15820 * @returns Strict VBox status code.
15821 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15822 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15823 *
15824 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15825 * @param pExitInfo Pointer to the VM-exit information.
15826 * @param pExitEventInfo Pointer to the VM-exit event information.
15827 * @thread EMT(pVCpu)
15828 */
15829VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15830{
15831 Assert(pExitInfo);
15832 Assert(pExitEventInfo);
15833 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15834 Assert(!pVCpu->iem.s.cActiveMappings);
15835 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15836
15837}
15838
15839
15840/**
15841 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15842 * VM-exit.
15843 *
15844 * @returns Strict VBox status code.
15845 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15846 * @thread EMT(pVCpu)
15847 */
15848VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15849{
15850 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15851 Assert(!pVCpu->iem.s.cActiveMappings);
15852 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15853}
15854
15855
15856/**
15857 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15858 *
15859 * @returns Strict VBox status code.
15860 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15861 * @thread EMT(pVCpu)
15862 */
15863VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15864{
15865 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15866 Assert(!pVCpu->iem.s.cActiveMappings);
15867 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15868}
15869
15870
15871/**
15872 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15873 *
15874 * @returns Strict VBox status code.
15875 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15876 * @param uVector The external interrupt vector (pass 0 if the external
15877 * interrupt is still pending).
15878 * @param fIntPending Whether the external interrupt is pending or
15879 * acknowdledged in the interrupt controller.
15880 * @thread EMT(pVCpu)
15881 */
15882VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15883{
15884 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15885 Assert(!pVCpu->iem.s.cActiveMappings);
15886 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15887}
15888
15889
15890/**
15891 * Interface for HM and EM to emulate VM-exit due to exceptions (incl. NMIs).
15892 *
15893 * @returns Strict VBox status code.
15894 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15895 * @param pExitInfo Pointer to the VM-exit information.
15896 * @param pExitEventInfo Pointer to the VM-exit event information.
15897 * @thread EMT(pVCpu)
15898 */
15899VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15900{
15901 Assert(pExitInfo);
15902 Assert(pExitEventInfo);
15903 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15904 Assert(!pVCpu->iem.s.cActiveMappings);
15905 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15906}
15907
15908
15909/**
15910 * Interface for HM and EM to emulate VM-exit due to NMIs.
15911 *
15912 * @returns Strict VBox status code.
15913 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15914 * @thread EMT(pVCpu)
15915 */
15916VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPU pVCpu)
15917{
15918 VMXVEXITINFO ExitInfo;
15919 RT_ZERO(ExitInfo);
15920 VMXVEXITEVENTINFO ExitEventInfo;
15921 RT_ZERO(ExitInfo);
15922 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15923 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15924 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15925
15926 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15927 Assert(!pVCpu->iem.s.cActiveMappings);
15928 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15929}
15930
15931
15932/**
15933 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15934 *
15935 * @returns Strict VBox status code.
15936 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15937 * @thread EMT(pVCpu)
15938 */
15939VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPU pVCpu)
15940{
15941 VBOXSTRICTRC rcStrict = iemVmxVmexitTripleFault(pVCpu);
15942 Assert(!pVCpu->iem.s.cActiveMappings);
15943 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15944}
15945
15946
15947/**
15948 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15949 *
15950 * @returns Strict VBox status code.
15951 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15952 * @param uVector The SIPI vector.
15953 * @thread EMT(pVCpu)
15954 */
15955VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15956{
15957 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15958 Assert(!pVCpu->iem.s.cActiveMappings);
15959 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15960}
15961
15962
15963/**
15964 * Interface for HM and EM to emulate a VM-exit.
15965 *
15966 * If a specialized version of a VM-exit handler exists, that must be used instead.
15967 *
15968 * @returns Strict VBox status code.
15969 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15970 * @param uExitReason The VM-exit reason.
15971 * @param u64ExitQual The Exit qualification.
15972 * @thread EMT(pVCpu)
15973 */
15974VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15975{
15976 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15977 Assert(!pVCpu->iem.s.cActiveMappings);
15978 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15979}
15980
15981
15982/**
15983 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15984 *
15985 * This is meant to be used for those instructions that VMX provides additional
15986 * decoding information beyond just the instruction length!
15987 *
15988 * @returns Strict VBox status code.
15989 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15990 * @param pExitInfo Pointer to the VM-exit information.
15991 * @thread EMT(pVCpu)
15992 */
15993VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15994{
15995 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15996 Assert(!pVCpu->iem.s.cActiveMappings);
15997 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15998}
15999
16000
16001/**
16002 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16003 *
16004 * This is meant to be used for those instructions that VMX provides only the
16005 * instruction length.
16006 *
16007 * @returns Strict VBox status code.
16008 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16009 * @param pExitInfo Pointer to the VM-exit information.
16010 * @param cbInstr The instruction length in bytes.
16011 * @thread EMT(pVCpu)
16012 */
16013VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
16014{
16015 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
16016 Assert(!pVCpu->iem.s.cActiveMappings);
16017 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16018}
16019
16020
16021/**
16022 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16023 *
16024 * @returns Strict VBox status code.
16025 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16026 * @param pExitInfo Pointer to the VM-exit information.
16027 * @param pExitEventInfo Pointer to the VM-exit event information.
16028 * @thread EMT(pVCpu)
16029 */
16030VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16031{
16032 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16033 Assert(!pVCpu->iem.s.cActiveMappings);
16034 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16035}
16036
16037
16038/**
16039 * Interface for HM and EM to emulate the VMREAD instruction.
16040 *
16041 * @returns Strict VBox status code.
16042 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16043 * @param pExitInfo Pointer to the VM-exit information.
16044 * @thread EMT(pVCpu)
16045 */
16046VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16047{
16048 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16049 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16050 Assert(pExitInfo);
16051
16052 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16053
16054 VBOXSTRICTRC rcStrict;
16055 uint8_t const cbInstr = pExitInfo->cbInstr;
16056 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16057 uint64_t const u64FieldEnc = fIs64BitMode
16058 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16059 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16060 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16061 {
16062 if (fIs64BitMode)
16063 {
16064 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16065 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16066 }
16067 else
16068 {
16069 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16070 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16071 }
16072 }
16073 else
16074 {
16075 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16076 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16077 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16078 }
16079 Assert(!pVCpu->iem.s.cActiveMappings);
16080 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16081}
16082
16083
16084/**
16085 * Interface for HM and EM to emulate the VMWRITE instruction.
16086 *
16087 * @returns Strict VBox status code.
16088 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16089 * @param pExitInfo Pointer to the VM-exit information.
16090 * @thread EMT(pVCpu)
16091 */
16092VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16093{
16094 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16095 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16096 Assert(pExitInfo);
16097
16098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16099
16100 uint64_t u64Val;
16101 uint8_t iEffSeg;
16102 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16103 {
16104 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16105 iEffSeg = UINT8_MAX;
16106 }
16107 else
16108 {
16109 u64Val = pExitInfo->GCPtrEffAddr;
16110 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16111 }
16112 uint8_t const cbInstr = pExitInfo->cbInstr;
16113 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16114 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16115 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16116 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16117 Assert(!pVCpu->iem.s.cActiveMappings);
16118 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16119}
16120
16121
16122/**
16123 * Interface for HM and EM to emulate the VMPTRLD instruction.
16124 *
16125 * @returns Strict VBox status code.
16126 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16127 * @param pExitInfo Pointer to the VM-exit information.
16128 * @thread EMT(pVCpu)
16129 */
16130VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16131{
16132 Assert(pExitInfo);
16133 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16134 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16135
16136 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16137
16138 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16139 uint8_t const cbInstr = pExitInfo->cbInstr;
16140 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16141 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16142 Assert(!pVCpu->iem.s.cActiveMappings);
16143 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16144}
16145
16146
16147/**
16148 * Interface for HM and EM to emulate the VMPTRST instruction.
16149 *
16150 * @returns Strict VBox status code.
16151 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16152 * @param pExitInfo Pointer to the VM-exit information.
16153 * @thread EMT(pVCpu)
16154 */
16155VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16156{
16157 Assert(pExitInfo);
16158 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16159 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16160
16161 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16162
16163 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16164 uint8_t const cbInstr = pExitInfo->cbInstr;
16165 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16166 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16167 Assert(!pVCpu->iem.s.cActiveMappings);
16168 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16169}
16170
16171
16172/**
16173 * Interface for HM and EM to emulate the VMCLEAR instruction.
16174 *
16175 * @returns Strict VBox status code.
16176 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16177 * @param pExitInfo Pointer to the VM-exit information.
16178 * @thread EMT(pVCpu)
16179 */
16180VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16181{
16182 Assert(pExitInfo);
16183 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16184 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16185
16186 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16187
16188 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16189 uint8_t const cbInstr = pExitInfo->cbInstr;
16190 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16191 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16192 Assert(!pVCpu->iem.s.cActiveMappings);
16193 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16194}
16195
16196
16197/**
16198 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16199 *
16200 * @returns Strict VBox status code.
16201 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16202 * @param cbInstr The instruction length in bytes.
16203 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16204 * VMXINSTRID_VMRESUME).
16205 * @thread EMT(pVCpu)
16206 */
16207VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16208{
16209 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16210 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16211
16212 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16213 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16214 Assert(!pVCpu->iem.s.cActiveMappings);
16215 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16216}
16217
16218
16219/**
16220 * Interface for HM and EM to emulate the VMXON instruction.
16221 *
16222 * @returns Strict VBox status code.
16223 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16224 * @param pExitInfo Pointer to the VM-exit information.
16225 * @thread EMT(pVCpu)
16226 */
16227VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16228{
16229 Assert(pExitInfo);
16230 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16231 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16232
16233 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16234
16235 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16236 uint8_t const cbInstr = pExitInfo->cbInstr;
16237 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16238 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16239 Assert(!pVCpu->iem.s.cActiveMappings);
16240 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16241}
16242
16243
16244/**
16245 * Interface for HM and EM to emulate the VMXOFF instruction.
16246 *
16247 * @returns Strict VBox status code.
16248 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16249 * @param cbInstr The instruction length in bytes.
16250 * @thread EMT(pVCpu)
16251 */
16252VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16253{
16254 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16255 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16256
16257 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16258 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16259 Assert(!pVCpu->iem.s.cActiveMappings);
16260 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16261}
16262
16263
16264/**
16265 * Interface for HM and EM to emulate the INVVPID instruction.
16266 *
16267 * @returns Strict VBox status code.
16268 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16269 * @param pExitInfo Pointer to the VM-exit information.
16270 * @thread EMT(pVCpu)
16271 */
16272VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16273{
16274 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16275 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16276 Assert(pExitInfo);
16277
16278 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16279
16280 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16281 uint8_t const cbInstr = pExitInfo->cbInstr;
16282 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16283 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16284 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16285 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16286 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16287 Assert(!pVCpu->iem.s.cActiveMappings);
16288 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16289}
16290
16291
16292/**
16293 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16294 *
16295 * @remarks The @a pvUser argument is currently unused.
16296 */
16297PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16298 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16299 PGMACCESSORIGIN enmOrigin, void *pvUser)
16300{
16301 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16302
16303 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16304 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16305 {
16306 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16307 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16308
16309 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16310 * Currently they will go through as read accesses. */
16311 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16312 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16313 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16314 if (RT_FAILURE(rcStrict))
16315 return rcStrict;
16316
16317 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16318 return VINF_SUCCESS;
16319 }
16320
16321 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16322 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16323 if (RT_FAILURE(rc))
16324 return rc;
16325
16326 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16327 return VINF_PGM_HANDLER_DO_DEFAULT;
16328}
16329
16330#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16331
16332#ifdef IN_RING3
16333
16334/**
16335 * Handles the unlikely and probably fatal merge cases.
16336 *
16337 * @returns Merged status code.
16338 * @param rcStrict Current EM status code.
16339 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16340 * with @a rcStrict.
16341 * @param iMemMap The memory mapping index. For error reporting only.
16342 * @param pVCpu The cross context virtual CPU structure of the calling
16343 * thread, for error reporting only.
16344 */
16345DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16346 unsigned iMemMap, PVMCPU pVCpu)
16347{
16348 if (RT_FAILURE_NP(rcStrict))
16349 return rcStrict;
16350
16351 if (RT_FAILURE_NP(rcStrictCommit))
16352 return rcStrictCommit;
16353
16354 if (rcStrict == rcStrictCommit)
16355 return rcStrictCommit;
16356
16357 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16358 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16359 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16362 return VERR_IOM_FF_STATUS_IPE;
16363}
16364
16365
16366/**
16367 * Helper for IOMR3ProcessForceFlag.
16368 *
16369 * @returns Merged status code.
16370 * @param rcStrict Current EM status code.
16371 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16372 * with @a rcStrict.
16373 * @param iMemMap The memory mapping index. For error reporting only.
16374 * @param pVCpu The cross context virtual CPU structure of the calling
16375 * thread, for error reporting only.
16376 */
16377DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16378{
16379 /* Simple. */
16380 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16381 return rcStrictCommit;
16382
16383 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16384 return rcStrict;
16385
16386 /* EM scheduling status codes. */
16387 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16388 && rcStrict <= VINF_EM_LAST))
16389 {
16390 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16391 && rcStrictCommit <= VINF_EM_LAST))
16392 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16393 }
16394
16395 /* Unlikely */
16396 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16397}
16398
16399
16400/**
16401 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16402 *
16403 * @returns Merge between @a rcStrict and what the commit operation returned.
16404 * @param pVM The cross context VM structure.
16405 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16406 * @param rcStrict The status code returned by ring-0 or raw-mode.
16407 */
16408VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16409{
16410 /*
16411 * Reset the pending commit.
16412 */
16413 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16414 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16415 ("%#x %#x %#x\n",
16416 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16417 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16418
16419 /*
16420 * Commit the pending bounce buffers (usually just one).
16421 */
16422 unsigned cBufs = 0;
16423 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16424 while (iMemMap-- > 0)
16425 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16426 {
16427 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16428 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16429 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16430
16431 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16432 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16433 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16434
16435 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16436 {
16437 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16439 pbBuf,
16440 cbFirst,
16441 PGMACCESSORIGIN_IEM);
16442 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16443 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16444 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16445 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16446 }
16447
16448 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16449 {
16450 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16451 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16452 pbBuf + cbFirst,
16453 cbSecond,
16454 PGMACCESSORIGIN_IEM);
16455 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16456 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16457 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16458 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16459 }
16460 cBufs++;
16461 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16462 }
16463
16464 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16465 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16466 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16467 pVCpu->iem.s.cActiveMappings = 0;
16468 return rcStrict;
16469}
16470
16471#endif /* IN_RING3 */
16472
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette