VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 78058

Last change on this file since 78058 was 77898, checked in by vboxsync, 6 years ago

VMM/IEM: The EXT bit of the error code must be 1 for nested exceptions when delivering a #DB using INT1 (ICEBP).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 647.9 KB
Line 
1/* $Id: IEMAll.cpp 77898 2019-03-27 06:13:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handle for triple faults.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
450 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmi(PVMCPU pVCpu);
987IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
988IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
989IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
990IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmiWindow(PVMCPU pVCpu);
991IEM_STATIC VBOXSTRICTRC iemVmxVmexitMtf(PVMCPU pVCpu);
992IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
993IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
994IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
995IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
996#endif
997
998#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
999IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
1000IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
1001#endif
1002
1003
1004/**
1005 * Sets the pass up status.
1006 *
1007 * @returns VINF_SUCCESS.
1008 * @param pVCpu The cross context virtual CPU structure of the
1009 * calling thread.
1010 * @param rcPassUp The pass up status. Must be informational.
1011 * VINF_SUCCESS is not allowed.
1012 */
1013IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1014{
1015 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1016
1017 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1018 if (rcOldPassUp == VINF_SUCCESS)
1019 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1020 /* If both are EM scheduling codes, use EM priority rules. */
1021 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1022 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1023 {
1024 if (rcPassUp < rcOldPassUp)
1025 {
1026 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1028 }
1029 else
1030 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1031 }
1032 /* Override EM scheduling with specific status code. */
1033 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1034 {
1035 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1036 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1037 }
1038 /* Don't override specific status code, first come first served. */
1039 else
1040 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1041 return VINF_SUCCESS;
1042}
1043
1044
1045/**
1046 * Calculates the CPU mode.
1047 *
1048 * This is mainly for updating IEMCPU::enmCpuMode.
1049 *
1050 * @returns CPU mode.
1051 * @param pVCpu The cross context virtual CPU structure of the
1052 * calling thread.
1053 */
1054DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1055{
1056 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1057 return IEMMODE_64BIT;
1058 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1059 return IEMMODE_32BIT;
1060 return IEMMODE_16BIT;
1061}
1062
1063
1064/**
1065 * Initializes the execution state.
1066 *
1067 * @param pVCpu The cross context virtual CPU structure of the
1068 * calling thread.
1069 * @param fBypassHandlers Whether to bypass access handlers.
1070 *
1071 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1072 * side-effects in strict builds.
1073 */
1074DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1075{
1076 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1077 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1078
1079#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1084 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1086 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1087 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1088#endif
1089
1090#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1091 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1092#endif
1093 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1094 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1095#ifdef VBOX_STRICT
1096 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1097 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1098 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1099 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1100 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1101 pVCpu->iem.s.uRexReg = 127;
1102 pVCpu->iem.s.uRexB = 127;
1103 pVCpu->iem.s.offModRm = 127;
1104 pVCpu->iem.s.uRexIndex = 127;
1105 pVCpu->iem.s.iEffSeg = 127;
1106 pVCpu->iem.s.idxPrefix = 127;
1107 pVCpu->iem.s.uVex3rdReg = 127;
1108 pVCpu->iem.s.uVexLength = 127;
1109 pVCpu->iem.s.fEvexStuff = 127;
1110 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1111# ifdef IEM_WITH_CODE_TLB
1112 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1113 pVCpu->iem.s.pbInstrBuf = NULL;
1114 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1115 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1116 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1117 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1118# else
1119 pVCpu->iem.s.offOpcode = 127;
1120 pVCpu->iem.s.cbOpcode = 127;
1121# endif
1122#endif
1123
1124 pVCpu->iem.s.cActiveMappings = 0;
1125 pVCpu->iem.s.iNextMapping = 0;
1126 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1127 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1128#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1129 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1130 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1131 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1132 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1133 if (!pVCpu->iem.s.fInPatchCode)
1134 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1135#endif
1136}
1137
1138#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1139/**
1140 * Performs a minimal reinitialization of the execution state.
1141 *
1142 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1143 * 'world-switch' types operations on the CPU. Currently only nested
1144 * hardware-virtualization uses it.
1145 *
1146 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1147 */
1148IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1149{
1150 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1151 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1152
1153 pVCpu->iem.s.uCpl = uCpl;
1154 pVCpu->iem.s.enmCpuMode = enmMode;
1155 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1156 pVCpu->iem.s.enmEffAddrMode = enmMode;
1157 if (enmMode != IEMMODE_64BIT)
1158 {
1159 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1160 pVCpu->iem.s.enmEffOpSize = enmMode;
1161 }
1162 else
1163 {
1164 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1165 pVCpu->iem.s.enmEffOpSize = enmMode;
1166 }
1167 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1168#ifndef IEM_WITH_CODE_TLB
1169 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1170 pVCpu->iem.s.offOpcode = 0;
1171 pVCpu->iem.s.cbOpcode = 0;
1172#endif
1173 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1174}
1175#endif
1176
1177/**
1178 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1179 *
1180 * @param pVCpu The cross context virtual CPU structure of the
1181 * calling thread.
1182 */
1183DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1184{
1185 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1186#ifdef VBOX_STRICT
1187# ifdef IEM_WITH_CODE_TLB
1188 NOREF(pVCpu);
1189# else
1190 pVCpu->iem.s.cbOpcode = 0;
1191# endif
1192#else
1193 NOREF(pVCpu);
1194#endif
1195}
1196
1197
1198/**
1199 * Initializes the decoder state.
1200 *
1201 * iemReInitDecoder is mostly a copy of this function.
1202 *
1203 * @param pVCpu The cross context virtual CPU structure of the
1204 * calling thread.
1205 * @param fBypassHandlers Whether to bypass access handlers.
1206 */
1207DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1208{
1209 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1210 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1211
1212#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1221#endif
1222
1223#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1224 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1225#endif
1226 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1227 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1228 pVCpu->iem.s.enmCpuMode = enmMode;
1229 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1230 pVCpu->iem.s.enmEffAddrMode = enmMode;
1231 if (enmMode != IEMMODE_64BIT)
1232 {
1233 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1234 pVCpu->iem.s.enmEffOpSize = enmMode;
1235 }
1236 else
1237 {
1238 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1239 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1240 }
1241 pVCpu->iem.s.fPrefixes = 0;
1242 pVCpu->iem.s.uRexReg = 0;
1243 pVCpu->iem.s.uRexB = 0;
1244 pVCpu->iem.s.uRexIndex = 0;
1245 pVCpu->iem.s.idxPrefix = 0;
1246 pVCpu->iem.s.uVex3rdReg = 0;
1247 pVCpu->iem.s.uVexLength = 0;
1248 pVCpu->iem.s.fEvexStuff = 0;
1249 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1250#ifdef IEM_WITH_CODE_TLB
1251 pVCpu->iem.s.pbInstrBuf = NULL;
1252 pVCpu->iem.s.offInstrNextByte = 0;
1253 pVCpu->iem.s.offCurInstrStart = 0;
1254# ifdef VBOX_STRICT
1255 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1256 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1257 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1258# endif
1259#else
1260 pVCpu->iem.s.offOpcode = 0;
1261 pVCpu->iem.s.cbOpcode = 0;
1262#endif
1263 pVCpu->iem.s.offModRm = 0;
1264 pVCpu->iem.s.cActiveMappings = 0;
1265 pVCpu->iem.s.iNextMapping = 0;
1266 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1267 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1268#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1269 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1270 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1271 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1272 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1273 if (!pVCpu->iem.s.fInPatchCode)
1274 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1275#endif
1276
1277#ifdef DBGFTRACE_ENABLED
1278 switch (enmMode)
1279 {
1280 case IEMMODE_64BIT:
1281 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1282 break;
1283 case IEMMODE_32BIT:
1284 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1285 break;
1286 case IEMMODE_16BIT:
1287 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1288 break;
1289 }
1290#endif
1291}
1292
1293
1294/**
1295 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1296 *
1297 * This is mostly a copy of iemInitDecoder.
1298 *
1299 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1300 */
1301DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1302{
1303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1304
1305#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1313 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1314#endif
1315
1316 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1317 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1318 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1319 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1320 pVCpu->iem.s.enmEffAddrMode = enmMode;
1321 if (enmMode != IEMMODE_64BIT)
1322 {
1323 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1324 pVCpu->iem.s.enmEffOpSize = enmMode;
1325 }
1326 else
1327 {
1328 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1329 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1330 }
1331 pVCpu->iem.s.fPrefixes = 0;
1332 pVCpu->iem.s.uRexReg = 0;
1333 pVCpu->iem.s.uRexB = 0;
1334 pVCpu->iem.s.uRexIndex = 0;
1335 pVCpu->iem.s.idxPrefix = 0;
1336 pVCpu->iem.s.uVex3rdReg = 0;
1337 pVCpu->iem.s.uVexLength = 0;
1338 pVCpu->iem.s.fEvexStuff = 0;
1339 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1340#ifdef IEM_WITH_CODE_TLB
1341 if (pVCpu->iem.s.pbInstrBuf)
1342 {
1343 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1344 - pVCpu->iem.s.uInstrBufPc;
1345 if (off < pVCpu->iem.s.cbInstrBufTotal)
1346 {
1347 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1348 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1349 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1350 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1351 else
1352 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1353 }
1354 else
1355 {
1356 pVCpu->iem.s.pbInstrBuf = NULL;
1357 pVCpu->iem.s.offInstrNextByte = 0;
1358 pVCpu->iem.s.offCurInstrStart = 0;
1359 pVCpu->iem.s.cbInstrBuf = 0;
1360 pVCpu->iem.s.cbInstrBufTotal = 0;
1361 }
1362 }
1363 else
1364 {
1365 pVCpu->iem.s.offInstrNextByte = 0;
1366 pVCpu->iem.s.offCurInstrStart = 0;
1367 pVCpu->iem.s.cbInstrBuf = 0;
1368 pVCpu->iem.s.cbInstrBufTotal = 0;
1369 }
1370#else
1371 pVCpu->iem.s.cbOpcode = 0;
1372 pVCpu->iem.s.offOpcode = 0;
1373#endif
1374 pVCpu->iem.s.offModRm = 0;
1375 Assert(pVCpu->iem.s.cActiveMappings == 0);
1376 pVCpu->iem.s.iNextMapping = 0;
1377 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1378 Assert(pVCpu->iem.s.fBypassHandlers == false);
1379#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1380 if (!pVCpu->iem.s.fInPatchCode)
1381 { /* likely */ }
1382 else
1383 {
1384 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1385 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1386 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1387 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1388 if (!pVCpu->iem.s.fInPatchCode)
1389 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1390 }
1391#endif
1392
1393#ifdef DBGFTRACE_ENABLED
1394 switch (enmMode)
1395 {
1396 case IEMMODE_64BIT:
1397 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1398 break;
1399 case IEMMODE_32BIT:
1400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1401 break;
1402 case IEMMODE_16BIT:
1403 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1404 break;
1405 }
1406#endif
1407}
1408
1409
1410
1411/**
1412 * Prefetch opcodes the first time when starting executing.
1413 *
1414 * @returns Strict VBox status code.
1415 * @param pVCpu The cross context virtual CPU structure of the
1416 * calling thread.
1417 * @param fBypassHandlers Whether to bypass access handlers.
1418 */
1419IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1420{
1421 iemInitDecoder(pVCpu, fBypassHandlers);
1422
1423#ifdef IEM_WITH_CODE_TLB
1424 /** @todo Do ITLB lookup here. */
1425
1426#else /* !IEM_WITH_CODE_TLB */
1427
1428 /*
1429 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1430 *
1431 * First translate CS:rIP to a physical address.
1432 */
1433 uint32_t cbToTryRead;
1434 RTGCPTR GCPtrPC;
1435 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1436 {
1437 cbToTryRead = PAGE_SIZE;
1438 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1439 if (IEM_IS_CANONICAL(GCPtrPC))
1440 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1441 else
1442 return iemRaiseGeneralProtectionFault0(pVCpu);
1443 }
1444 else
1445 {
1446 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1447 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1448 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1449 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1450 else
1451 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1452 if (cbToTryRead) { /* likely */ }
1453 else /* overflowed */
1454 {
1455 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1456 cbToTryRead = UINT32_MAX;
1457 }
1458 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1459 Assert(GCPtrPC <= UINT32_MAX);
1460 }
1461
1462# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1463 /* Allow interpretation of patch manager code blocks since they can for
1464 instance throw #PFs for perfectly good reasons. */
1465 if (pVCpu->iem.s.fInPatchCode)
1466 {
1467 size_t cbRead = 0;
1468 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1469 AssertRCReturn(rc, rc);
1470 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1471 return VINF_SUCCESS;
1472 }
1473# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1474
1475 RTGCPHYS GCPhys;
1476 uint64_t fFlags;
1477 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1478 if (RT_SUCCESS(rc)) { /* probable */ }
1479 else
1480 {
1481 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1482 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1483 }
1484 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1485 else
1486 {
1487 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1488 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1489 }
1490 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1491 else
1492 {
1493 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1494 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1495 }
1496 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1497 /** @todo Check reserved bits and such stuff. PGM is better at doing
1498 * that, so do it when implementing the guest virtual address
1499 * TLB... */
1500
1501 /*
1502 * Read the bytes at this address.
1503 */
1504 PVM pVM = pVCpu->CTX_SUFF(pVM);
1505# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1506 size_t cbActual;
1507 if ( PATMIsEnabled(pVM)
1508 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1509 {
1510 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1511 Assert(cbActual > 0);
1512 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1513 }
1514 else
1515# endif
1516 {
1517 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1518 if (cbToTryRead > cbLeftOnPage)
1519 cbToTryRead = cbLeftOnPage;
1520 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1521 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1522
1523 if (!pVCpu->iem.s.fBypassHandlers)
1524 {
1525 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1526 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1527 { /* likely */ }
1528 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1529 {
1530 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1531 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1532 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1533 }
1534 else
1535 {
1536 Log((RT_SUCCESS(rcStrict)
1537 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1538 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1539 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1540 return rcStrict;
1541 }
1542 }
1543 else
1544 {
1545 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1546 if (RT_SUCCESS(rc))
1547 { /* likely */ }
1548 else
1549 {
1550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1551 GCPtrPC, GCPhys, rc, cbToTryRead));
1552 return rc;
1553 }
1554 }
1555 pVCpu->iem.s.cbOpcode = cbToTryRead;
1556 }
1557#endif /* !IEM_WITH_CODE_TLB */
1558 return VINF_SUCCESS;
1559}
1560
1561
1562/**
1563 * Invalidates the IEM TLBs.
1564 *
1565 * This is called internally as well as by PGM when moving GC mappings.
1566 *
1567 * @returns
1568 * @param pVCpu The cross context virtual CPU structure of the calling
1569 * thread.
1570 * @param fVmm Set when PGM calls us with a remapping.
1571 */
1572VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1573{
1574#ifdef IEM_WITH_CODE_TLB
1575 pVCpu->iem.s.cbInstrBufTotal = 0;
1576 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1577 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1578 { /* very likely */ }
1579 else
1580 {
1581 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1582 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1583 while (i-- > 0)
1584 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1585 }
1586#endif
1587
1588#ifdef IEM_WITH_DATA_TLB
1589 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1590 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1591 { /* very likely */ }
1592 else
1593 {
1594 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1595 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1596 while (i-- > 0)
1597 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1598 }
1599#endif
1600 NOREF(pVCpu); NOREF(fVmm);
1601}
1602
1603
1604/**
1605 * Invalidates a page in the TLBs.
1606 *
1607 * @param pVCpu The cross context virtual CPU structure of the calling
1608 * thread.
1609 * @param GCPtr The address of the page to invalidate
1610 */
1611VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1612{
1613#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1614 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1615 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1616 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1617 uintptr_t idx = (uint8_t)GCPtr;
1618
1619# ifdef IEM_WITH_CODE_TLB
1620 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1621 {
1622 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1623 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1624 pVCpu->iem.s.cbInstrBufTotal = 0;
1625 }
1626# endif
1627
1628# ifdef IEM_WITH_DATA_TLB
1629 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1630 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1631# endif
1632#else
1633 NOREF(pVCpu); NOREF(GCPtr);
1634#endif
1635}
1636
1637
1638/**
1639 * Invalidates the host physical aspects of the IEM TLBs.
1640 *
1641 * This is called internally as well as by PGM when moving GC mappings.
1642 *
1643 * @param pVCpu The cross context virtual CPU structure of the calling
1644 * thread.
1645 */
1646VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1647{
1648#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1649 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1650
1651# ifdef IEM_WITH_CODE_TLB
1652 pVCpu->iem.s.cbInstrBufTotal = 0;
1653# endif
1654 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1655 if (uTlbPhysRev != 0)
1656 {
1657 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1658 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1659 }
1660 else
1661 {
1662 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1663 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1664
1665 unsigned i;
1666# ifdef IEM_WITH_CODE_TLB
1667 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1668 while (i-- > 0)
1669 {
1670 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1671 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1672 }
1673# endif
1674# ifdef IEM_WITH_DATA_TLB
1675 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1676 while (i-- > 0)
1677 {
1678 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1679 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1680 }
1681# endif
1682 }
1683#else
1684 NOREF(pVCpu);
1685#endif
1686}
1687
1688
1689/**
1690 * Invalidates the host physical aspects of the IEM TLBs.
1691 *
1692 * This is called internally as well as by PGM when moving GC mappings.
1693 *
1694 * @param pVM The cross context VM structure.
1695 *
1696 * @remarks Caller holds the PGM lock.
1697 */
1698VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1699{
1700 RT_NOREF_PV(pVM);
1701}
1702
1703#ifdef IEM_WITH_CODE_TLB
1704
1705/**
1706 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1707 * failure and jumps.
1708 *
1709 * We end up here for a number of reasons:
1710 * - pbInstrBuf isn't yet initialized.
1711 * - Advancing beyond the buffer boundrary (e.g. cross page).
1712 * - Advancing beyond the CS segment limit.
1713 * - Fetching from non-mappable page (e.g. MMIO).
1714 *
1715 * @param pVCpu The cross context virtual CPU structure of the
1716 * calling thread.
1717 * @param pvDst Where to return the bytes.
1718 * @param cbDst Number of bytes to read.
1719 *
1720 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1721 */
1722IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1723{
1724#ifdef IN_RING3
1725 for (;;)
1726 {
1727 Assert(cbDst <= 8);
1728 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1729
1730 /*
1731 * We might have a partial buffer match, deal with that first to make the
1732 * rest simpler. This is the first part of the cross page/buffer case.
1733 */
1734 if (pVCpu->iem.s.pbInstrBuf != NULL)
1735 {
1736 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1737 {
1738 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1739 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1740 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1741
1742 cbDst -= cbCopy;
1743 pvDst = (uint8_t *)pvDst + cbCopy;
1744 offBuf += cbCopy;
1745 pVCpu->iem.s.offInstrNextByte += offBuf;
1746 }
1747 }
1748
1749 /*
1750 * Check segment limit, figuring how much we're allowed to access at this point.
1751 *
1752 * We will fault immediately if RIP is past the segment limit / in non-canonical
1753 * territory. If we do continue, there are one or more bytes to read before we
1754 * end up in trouble and we need to do that first before faulting.
1755 */
1756 RTGCPTR GCPtrFirst;
1757 uint32_t cbMaxRead;
1758 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1759 {
1760 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1761 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1762 { /* likely */ }
1763 else
1764 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1765 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1766 }
1767 else
1768 {
1769 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1770 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1771 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1772 { /* likely */ }
1773 else
1774 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1775 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1776 if (cbMaxRead != 0)
1777 { /* likely */ }
1778 else
1779 {
1780 /* Overflowed because address is 0 and limit is max. */
1781 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1782 cbMaxRead = X86_PAGE_SIZE;
1783 }
1784 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1785 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1786 if (cbMaxRead2 < cbMaxRead)
1787 cbMaxRead = cbMaxRead2;
1788 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1789 }
1790
1791 /*
1792 * Get the TLB entry for this piece of code.
1793 */
1794 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1795 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1796 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1797 if (pTlbe->uTag == uTag)
1798 {
1799 /* likely when executing lots of code, otherwise unlikely */
1800# ifdef VBOX_WITH_STATISTICS
1801 pVCpu->iem.s.CodeTlb.cTlbHits++;
1802# endif
1803 }
1804 else
1805 {
1806 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1807# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1808 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1809 {
1810 pTlbe->uTag = uTag;
1811 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1812 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1813 pTlbe->GCPhys = NIL_RTGCPHYS;
1814 pTlbe->pbMappingR3 = NULL;
1815 }
1816 else
1817# endif
1818 {
1819 RTGCPHYS GCPhys;
1820 uint64_t fFlags;
1821 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1822 if (RT_FAILURE(rc))
1823 {
1824 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1825 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1826 }
1827
1828 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1829 pTlbe->uTag = uTag;
1830 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1831 pTlbe->GCPhys = GCPhys;
1832 pTlbe->pbMappingR3 = NULL;
1833 }
1834 }
1835
1836 /*
1837 * Check TLB page table level access flags.
1838 */
1839 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1840 {
1841 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1842 {
1843 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1844 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1845 }
1846 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1847 {
1848 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1849 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1850 }
1851 }
1852
1853# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1854 /*
1855 * Allow interpretation of patch manager code blocks since they can for
1856 * instance throw #PFs for perfectly good reasons.
1857 */
1858 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1859 { /* no unlikely */ }
1860 else
1861 {
1862 /** @todo Could be optimized this a little in ring-3 if we liked. */
1863 size_t cbRead = 0;
1864 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1865 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1866 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1867 return;
1868 }
1869# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1870
1871 /*
1872 * Look up the physical page info if necessary.
1873 */
1874 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1875 { /* not necessary */ }
1876 else
1877 {
1878 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1879 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1880 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1881 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1882 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1883 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1884 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1885 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1886 }
1887
1888# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1889 /*
1890 * Try do a direct read using the pbMappingR3 pointer.
1891 */
1892 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1893 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1894 {
1895 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1896 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1897 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1898 {
1899 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1900 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1901 }
1902 else
1903 {
1904 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1905 Assert(cbInstr < cbMaxRead);
1906 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1907 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1908 }
1909 if (cbDst <= cbMaxRead)
1910 {
1911 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1912 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1913 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1914 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1915 return;
1916 }
1917 pVCpu->iem.s.pbInstrBuf = NULL;
1918
1919 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1920 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1921 }
1922 else
1923# endif
1924#if 0
1925 /*
1926 * If there is no special read handling, so we can read a bit more and
1927 * put it in the prefetch buffer.
1928 */
1929 if ( cbDst < cbMaxRead
1930 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1931 {
1932 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1933 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1934 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1935 { /* likely */ }
1936 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1937 {
1938 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1939 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1940 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1941 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1942 }
1943 else
1944 {
1945 Log((RT_SUCCESS(rcStrict)
1946 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1947 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1948 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1949 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1950 }
1951 }
1952 /*
1953 * Special read handling, so only read exactly what's needed.
1954 * This is a highly unlikely scenario.
1955 */
1956 else
1957#endif
1958 {
1959 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1960 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1961 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1962 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1963 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1964 { /* likely */ }
1965 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1966 {
1967 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1968 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1969 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1970 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1971 }
1972 else
1973 {
1974 Log((RT_SUCCESS(rcStrict)
1975 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1976 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1977 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1978 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1979 }
1980 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1981 if (cbToRead == cbDst)
1982 return;
1983 }
1984
1985 /*
1986 * More to read, loop.
1987 */
1988 cbDst -= cbMaxRead;
1989 pvDst = (uint8_t *)pvDst + cbMaxRead;
1990 }
1991#else
1992 RT_NOREF(pvDst, cbDst);
1993 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1994#endif
1995}
1996
1997#else
1998
1999/**
2000 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
2001 * exception if it fails.
2002 *
2003 * @returns Strict VBox status code.
2004 * @param pVCpu The cross context virtual CPU structure of the
2005 * calling thread.
2006 * @param cbMin The minimum number of bytes relative offOpcode
2007 * that must be read.
2008 */
2009IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2010{
2011 /*
2012 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2013 *
2014 * First translate CS:rIP to a physical address.
2015 */
2016 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2017 uint32_t cbToTryRead;
2018 RTGCPTR GCPtrNext;
2019 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2020 {
2021 cbToTryRead = PAGE_SIZE;
2022 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2023 if (!IEM_IS_CANONICAL(GCPtrNext))
2024 return iemRaiseGeneralProtectionFault0(pVCpu);
2025 }
2026 else
2027 {
2028 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2029 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2030 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2031 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2032 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2033 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2034 if (!cbToTryRead) /* overflowed */
2035 {
2036 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2037 cbToTryRead = UINT32_MAX;
2038 /** @todo check out wrapping around the code segment. */
2039 }
2040 if (cbToTryRead < cbMin - cbLeft)
2041 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2042 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2043 }
2044
2045 /* Only read up to the end of the page, and make sure we don't read more
2046 than the opcode buffer can hold. */
2047 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2048 if (cbToTryRead > cbLeftOnPage)
2049 cbToTryRead = cbLeftOnPage;
2050 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2051 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2052/** @todo r=bird: Convert assertion into undefined opcode exception? */
2053 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2054
2055# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2056 /* Allow interpretation of patch manager code blocks since they can for
2057 instance throw #PFs for perfectly good reasons. */
2058 if (pVCpu->iem.s.fInPatchCode)
2059 {
2060 size_t cbRead = 0;
2061 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2062 AssertRCReturn(rc, rc);
2063 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2064 return VINF_SUCCESS;
2065 }
2066# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2067
2068 RTGCPHYS GCPhys;
2069 uint64_t fFlags;
2070 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2071 if (RT_FAILURE(rc))
2072 {
2073 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2074 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2075 }
2076 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2077 {
2078 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2079 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2080 }
2081 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2082 {
2083 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2084 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2085 }
2086 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2087 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2088 /** @todo Check reserved bits and such stuff. PGM is better at doing
2089 * that, so do it when implementing the guest virtual address
2090 * TLB... */
2091
2092 /*
2093 * Read the bytes at this address.
2094 *
2095 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2096 * and since PATM should only patch the start of an instruction there
2097 * should be no need to check again here.
2098 */
2099 if (!pVCpu->iem.s.fBypassHandlers)
2100 {
2101 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2102 cbToTryRead, PGMACCESSORIGIN_IEM);
2103 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2104 { /* likely */ }
2105 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2106 {
2107 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2108 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2109 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2110 }
2111 else
2112 {
2113 Log((RT_SUCCESS(rcStrict)
2114 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2115 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2116 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2117 return rcStrict;
2118 }
2119 }
2120 else
2121 {
2122 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2123 if (RT_SUCCESS(rc))
2124 { /* likely */ }
2125 else
2126 {
2127 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2128 return rc;
2129 }
2130 }
2131 pVCpu->iem.s.cbOpcode += cbToTryRead;
2132 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2133
2134 return VINF_SUCCESS;
2135}
2136
2137#endif /* !IEM_WITH_CODE_TLB */
2138#ifndef IEM_WITH_SETJMP
2139
2140/**
2141 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2142 *
2143 * @returns Strict VBox status code.
2144 * @param pVCpu The cross context virtual CPU structure of the
2145 * calling thread.
2146 * @param pb Where to return the opcode byte.
2147 */
2148DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2149{
2150 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2151 if (rcStrict == VINF_SUCCESS)
2152 {
2153 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2154 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2155 pVCpu->iem.s.offOpcode = offOpcode + 1;
2156 }
2157 else
2158 *pb = 0;
2159 return rcStrict;
2160}
2161
2162
2163/**
2164 * Fetches the next opcode byte.
2165 *
2166 * @returns Strict VBox status code.
2167 * @param pVCpu The cross context virtual CPU structure of the
2168 * calling thread.
2169 * @param pu8 Where to return the opcode byte.
2170 */
2171DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2172{
2173 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2174 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2175 {
2176 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2177 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2178 return VINF_SUCCESS;
2179 }
2180 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2181}
2182
2183#else /* IEM_WITH_SETJMP */
2184
2185/**
2186 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2187 *
2188 * @returns The opcode byte.
2189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2190 */
2191DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2192{
2193# ifdef IEM_WITH_CODE_TLB
2194 uint8_t u8;
2195 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2196 return u8;
2197# else
2198 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2199 if (rcStrict == VINF_SUCCESS)
2200 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2201 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2202# endif
2203}
2204
2205
2206/**
2207 * Fetches the next opcode byte, longjmp on error.
2208 *
2209 * @returns The opcode byte.
2210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2211 */
2212DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2213{
2214# ifdef IEM_WITH_CODE_TLB
2215 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2216 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2217 if (RT_LIKELY( pbBuf != NULL
2218 && offBuf < pVCpu->iem.s.cbInstrBuf))
2219 {
2220 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2221 return pbBuf[offBuf];
2222 }
2223# else
2224 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2225 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2226 {
2227 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2228 return pVCpu->iem.s.abOpcode[offOpcode];
2229 }
2230# endif
2231 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2232}
2233
2234#endif /* IEM_WITH_SETJMP */
2235
2236/**
2237 * Fetches the next opcode byte, returns automatically on failure.
2238 *
2239 * @param a_pu8 Where to return the opcode byte.
2240 * @remark Implicitly references pVCpu.
2241 */
2242#ifndef IEM_WITH_SETJMP
2243# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2244 do \
2245 { \
2246 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2247 if (rcStrict2 == VINF_SUCCESS) \
2248 { /* likely */ } \
2249 else \
2250 return rcStrict2; \
2251 } while (0)
2252#else
2253# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2254#endif /* IEM_WITH_SETJMP */
2255
2256
2257#ifndef IEM_WITH_SETJMP
2258/**
2259 * Fetches the next signed byte from the opcode stream.
2260 *
2261 * @returns Strict VBox status code.
2262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2263 * @param pi8 Where to return the signed byte.
2264 */
2265DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2266{
2267 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2268}
2269#endif /* !IEM_WITH_SETJMP */
2270
2271
2272/**
2273 * Fetches the next signed byte from the opcode stream, returning automatically
2274 * on failure.
2275 *
2276 * @param a_pi8 Where to return the signed byte.
2277 * @remark Implicitly references pVCpu.
2278 */
2279#ifndef IEM_WITH_SETJMP
2280# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2281 do \
2282 { \
2283 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2284 if (rcStrict2 != VINF_SUCCESS) \
2285 return rcStrict2; \
2286 } while (0)
2287#else /* IEM_WITH_SETJMP */
2288# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2289
2290#endif /* IEM_WITH_SETJMP */
2291
2292#ifndef IEM_WITH_SETJMP
2293
2294/**
2295 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2296 *
2297 * @returns Strict VBox status code.
2298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2299 * @param pu16 Where to return the opcode dword.
2300 */
2301DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2302{
2303 uint8_t u8;
2304 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2305 if (rcStrict == VINF_SUCCESS)
2306 *pu16 = (int8_t)u8;
2307 return rcStrict;
2308}
2309
2310
2311/**
2312 * Fetches the next signed byte from the opcode stream, extending it to
2313 * unsigned 16-bit.
2314 *
2315 * @returns Strict VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param pu16 Where to return the unsigned word.
2318 */
2319DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2320{
2321 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2322 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2323 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2324
2325 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2326 pVCpu->iem.s.offOpcode = offOpcode + 1;
2327 return VINF_SUCCESS;
2328}
2329
2330#endif /* !IEM_WITH_SETJMP */
2331
2332/**
2333 * Fetches the next signed byte from the opcode stream and sign-extending it to
2334 * a word, returning automatically on failure.
2335 *
2336 * @param a_pu16 Where to return the word.
2337 * @remark Implicitly references pVCpu.
2338 */
2339#ifndef IEM_WITH_SETJMP
2340# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2341 do \
2342 { \
2343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2344 if (rcStrict2 != VINF_SUCCESS) \
2345 return rcStrict2; \
2346 } while (0)
2347#else
2348# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2349#endif
2350
2351#ifndef IEM_WITH_SETJMP
2352
2353/**
2354 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2355 *
2356 * @returns Strict VBox status code.
2357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2358 * @param pu32 Where to return the opcode dword.
2359 */
2360DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2361{
2362 uint8_t u8;
2363 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2364 if (rcStrict == VINF_SUCCESS)
2365 *pu32 = (int8_t)u8;
2366 return rcStrict;
2367}
2368
2369
2370/**
2371 * Fetches the next signed byte from the opcode stream, extending it to
2372 * unsigned 32-bit.
2373 *
2374 * @returns Strict VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2376 * @param pu32 Where to return the unsigned dword.
2377 */
2378DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2379{
2380 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2381 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2382 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2383
2384 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2385 pVCpu->iem.s.offOpcode = offOpcode + 1;
2386 return VINF_SUCCESS;
2387}
2388
2389#endif /* !IEM_WITH_SETJMP */
2390
2391/**
2392 * Fetches the next signed byte from the opcode stream and sign-extending it to
2393 * a word, returning automatically on failure.
2394 *
2395 * @param a_pu32 Where to return the word.
2396 * @remark Implicitly references pVCpu.
2397 */
2398#ifndef IEM_WITH_SETJMP
2399#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2400 do \
2401 { \
2402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2403 if (rcStrict2 != VINF_SUCCESS) \
2404 return rcStrict2; \
2405 } while (0)
2406#else
2407# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2408#endif
2409
2410#ifndef IEM_WITH_SETJMP
2411
2412/**
2413 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2414 *
2415 * @returns Strict VBox status code.
2416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2417 * @param pu64 Where to return the opcode qword.
2418 */
2419DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2420{
2421 uint8_t u8;
2422 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2423 if (rcStrict == VINF_SUCCESS)
2424 *pu64 = (int8_t)u8;
2425 return rcStrict;
2426}
2427
2428
2429/**
2430 * Fetches the next signed byte from the opcode stream, extending it to
2431 * unsigned 64-bit.
2432 *
2433 * @returns Strict VBox status code.
2434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2435 * @param pu64 Where to return the unsigned qword.
2436 */
2437DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2438{
2439 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2440 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2441 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2442
2443 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2444 pVCpu->iem.s.offOpcode = offOpcode + 1;
2445 return VINF_SUCCESS;
2446}
2447
2448#endif /* !IEM_WITH_SETJMP */
2449
2450
2451/**
2452 * Fetches the next signed byte from the opcode stream and sign-extending it to
2453 * a word, returning automatically on failure.
2454 *
2455 * @param a_pu64 Where to return the word.
2456 * @remark Implicitly references pVCpu.
2457 */
2458#ifndef IEM_WITH_SETJMP
2459# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2460 do \
2461 { \
2462 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2463 if (rcStrict2 != VINF_SUCCESS) \
2464 return rcStrict2; \
2465 } while (0)
2466#else
2467# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2468#endif
2469
2470
2471#ifndef IEM_WITH_SETJMP
2472/**
2473 * Fetches the next opcode byte.
2474 *
2475 * @returns Strict VBox status code.
2476 * @param pVCpu The cross context virtual CPU structure of the
2477 * calling thread.
2478 * @param pu8 Where to return the opcode byte.
2479 */
2480DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2481{
2482 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2483 pVCpu->iem.s.offModRm = offOpcode;
2484 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2485 {
2486 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2487 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2488 return VINF_SUCCESS;
2489 }
2490 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2491}
2492#else /* IEM_WITH_SETJMP */
2493/**
2494 * Fetches the next opcode byte, longjmp on error.
2495 *
2496 * @returns The opcode byte.
2497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2498 */
2499DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2500{
2501# ifdef IEM_WITH_CODE_TLB
2502 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2503 pVCpu->iem.s.offModRm = offBuf;
2504 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2505 if (RT_LIKELY( pbBuf != NULL
2506 && offBuf < pVCpu->iem.s.cbInstrBuf))
2507 {
2508 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2509 return pbBuf[offBuf];
2510 }
2511# else
2512 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2513 pVCpu->iem.s.offModRm = offOpcode;
2514 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2515 {
2516 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2517 return pVCpu->iem.s.abOpcode[offOpcode];
2518 }
2519# endif
2520 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2521}
2522#endif /* IEM_WITH_SETJMP */
2523
2524/**
2525 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2526 * on failure.
2527 *
2528 * Will note down the position of the ModR/M byte for VT-x exits.
2529 *
2530 * @param a_pbRm Where to return the RM opcode byte.
2531 * @remark Implicitly references pVCpu.
2532 */
2533#ifndef IEM_WITH_SETJMP
2534# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2535 do \
2536 { \
2537 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2538 if (rcStrict2 == VINF_SUCCESS) \
2539 { /* likely */ } \
2540 else \
2541 return rcStrict2; \
2542 } while (0)
2543#else
2544# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2545#endif /* IEM_WITH_SETJMP */
2546
2547
2548#ifndef IEM_WITH_SETJMP
2549
2550/**
2551 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2552 *
2553 * @returns Strict VBox status code.
2554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2555 * @param pu16 Where to return the opcode word.
2556 */
2557DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2558{
2559 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2560 if (rcStrict == VINF_SUCCESS)
2561 {
2562 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2563# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2564 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2565# else
2566 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2567# endif
2568 pVCpu->iem.s.offOpcode = offOpcode + 2;
2569 }
2570 else
2571 *pu16 = 0;
2572 return rcStrict;
2573}
2574
2575
2576/**
2577 * Fetches the next opcode word.
2578 *
2579 * @returns Strict VBox status code.
2580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2581 * @param pu16 Where to return the opcode word.
2582 */
2583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2584{
2585 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2586 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2587 {
2588 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2589# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2590 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2591# else
2592 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2593# endif
2594 return VINF_SUCCESS;
2595 }
2596 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2597}
2598
2599#else /* IEM_WITH_SETJMP */
2600
2601/**
2602 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2603 *
2604 * @returns The opcode word.
2605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2606 */
2607DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2608{
2609# ifdef IEM_WITH_CODE_TLB
2610 uint16_t u16;
2611 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2612 return u16;
2613# else
2614 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2615 if (rcStrict == VINF_SUCCESS)
2616 {
2617 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2618 pVCpu->iem.s.offOpcode += 2;
2619# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2620 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2621# else
2622 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2623# endif
2624 }
2625 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2626# endif
2627}
2628
2629
2630/**
2631 * Fetches the next opcode word, longjmp on error.
2632 *
2633 * @returns The opcode word.
2634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2635 */
2636DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2637{
2638# ifdef IEM_WITH_CODE_TLB
2639 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2640 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2641 if (RT_LIKELY( pbBuf != NULL
2642 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2643 {
2644 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2645# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2646 return *(uint16_t const *)&pbBuf[offBuf];
2647# else
2648 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2649# endif
2650 }
2651# else
2652 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2654 {
2655 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2657 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2658# else
2659 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2660# endif
2661 }
2662# endif
2663 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2664}
2665
2666#endif /* IEM_WITH_SETJMP */
2667
2668
2669/**
2670 * Fetches the next opcode word, returns automatically on failure.
2671 *
2672 * @param a_pu16 Where to return the opcode word.
2673 * @remark Implicitly references pVCpu.
2674 */
2675#ifndef IEM_WITH_SETJMP
2676# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2677 do \
2678 { \
2679 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2680 if (rcStrict2 != VINF_SUCCESS) \
2681 return rcStrict2; \
2682 } while (0)
2683#else
2684# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2685#endif
2686
2687#ifndef IEM_WITH_SETJMP
2688
2689/**
2690 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2691 *
2692 * @returns Strict VBox status code.
2693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2694 * @param pu32 Where to return the opcode double word.
2695 */
2696DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2697{
2698 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2699 if (rcStrict == VINF_SUCCESS)
2700 {
2701 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2702 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2703 pVCpu->iem.s.offOpcode = offOpcode + 2;
2704 }
2705 else
2706 *pu32 = 0;
2707 return rcStrict;
2708}
2709
2710
2711/**
2712 * Fetches the next opcode word, zero extending it to a double word.
2713 *
2714 * @returns Strict VBox status code.
2715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2716 * @param pu32 Where to return the opcode double word.
2717 */
2718DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2719{
2720 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2721 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2722 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2723
2724 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2725 pVCpu->iem.s.offOpcode = offOpcode + 2;
2726 return VINF_SUCCESS;
2727}
2728
2729#endif /* !IEM_WITH_SETJMP */
2730
2731
2732/**
2733 * Fetches the next opcode word and zero extends it to a double word, returns
2734 * automatically on failure.
2735 *
2736 * @param a_pu32 Where to return the opcode double word.
2737 * @remark Implicitly references pVCpu.
2738 */
2739#ifndef IEM_WITH_SETJMP
2740# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2741 do \
2742 { \
2743 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2744 if (rcStrict2 != VINF_SUCCESS) \
2745 return rcStrict2; \
2746 } while (0)
2747#else
2748# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2749#endif
2750
2751#ifndef IEM_WITH_SETJMP
2752
2753/**
2754 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2755 *
2756 * @returns Strict VBox status code.
2757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2758 * @param pu64 Where to return the opcode quad word.
2759 */
2760DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2761{
2762 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2763 if (rcStrict == VINF_SUCCESS)
2764 {
2765 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2766 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2767 pVCpu->iem.s.offOpcode = offOpcode + 2;
2768 }
2769 else
2770 *pu64 = 0;
2771 return rcStrict;
2772}
2773
2774
2775/**
2776 * Fetches the next opcode word, zero extending it to a quad word.
2777 *
2778 * @returns Strict VBox status code.
2779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2780 * @param pu64 Where to return the opcode quad word.
2781 */
2782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2783{
2784 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2785 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2786 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2787
2788 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2789 pVCpu->iem.s.offOpcode = offOpcode + 2;
2790 return VINF_SUCCESS;
2791}
2792
2793#endif /* !IEM_WITH_SETJMP */
2794
2795/**
2796 * Fetches the next opcode word and zero extends it to a quad word, returns
2797 * automatically on failure.
2798 *
2799 * @param a_pu64 Where to return the opcode quad word.
2800 * @remark Implicitly references pVCpu.
2801 */
2802#ifndef IEM_WITH_SETJMP
2803# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2804 do \
2805 { \
2806 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2807 if (rcStrict2 != VINF_SUCCESS) \
2808 return rcStrict2; \
2809 } while (0)
2810#else
2811# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2812#endif
2813
2814
2815#ifndef IEM_WITH_SETJMP
2816/**
2817 * Fetches the next signed word from the opcode stream.
2818 *
2819 * @returns Strict VBox status code.
2820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2821 * @param pi16 Where to return the signed word.
2822 */
2823DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2824{
2825 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2826}
2827#endif /* !IEM_WITH_SETJMP */
2828
2829
2830/**
2831 * Fetches the next signed word from the opcode stream, returning automatically
2832 * on failure.
2833 *
2834 * @param a_pi16 Where to return the signed word.
2835 * @remark Implicitly references pVCpu.
2836 */
2837#ifndef IEM_WITH_SETJMP
2838# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2839 do \
2840 { \
2841 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2842 if (rcStrict2 != VINF_SUCCESS) \
2843 return rcStrict2; \
2844 } while (0)
2845#else
2846# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2847#endif
2848
2849#ifndef IEM_WITH_SETJMP
2850
2851/**
2852 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2853 *
2854 * @returns Strict VBox status code.
2855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2856 * @param pu32 Where to return the opcode dword.
2857 */
2858DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2859{
2860 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2861 if (rcStrict == VINF_SUCCESS)
2862 {
2863 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2864# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2865 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2866# else
2867 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2868 pVCpu->iem.s.abOpcode[offOpcode + 1],
2869 pVCpu->iem.s.abOpcode[offOpcode + 2],
2870 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2871# endif
2872 pVCpu->iem.s.offOpcode = offOpcode + 4;
2873 }
2874 else
2875 *pu32 = 0;
2876 return rcStrict;
2877}
2878
2879
2880/**
2881 * Fetches the next opcode dword.
2882 *
2883 * @returns Strict VBox status code.
2884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2885 * @param pu32 Where to return the opcode double word.
2886 */
2887DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2888{
2889 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2890 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2891 {
2892 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2893# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2894 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2895# else
2896 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2897 pVCpu->iem.s.abOpcode[offOpcode + 1],
2898 pVCpu->iem.s.abOpcode[offOpcode + 2],
2899 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2900# endif
2901 return VINF_SUCCESS;
2902 }
2903 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2904}
2905
2906#else /* !IEM_WITH_SETJMP */
2907
2908/**
2909 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2910 *
2911 * @returns The opcode dword.
2912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2913 */
2914DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2915{
2916# ifdef IEM_WITH_CODE_TLB
2917 uint32_t u32;
2918 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2919 return u32;
2920# else
2921 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2922 if (rcStrict == VINF_SUCCESS)
2923 {
2924 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2925 pVCpu->iem.s.offOpcode = offOpcode + 4;
2926# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2927 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2928# else
2929 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2930 pVCpu->iem.s.abOpcode[offOpcode + 1],
2931 pVCpu->iem.s.abOpcode[offOpcode + 2],
2932 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2933# endif
2934 }
2935 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2936# endif
2937}
2938
2939
2940/**
2941 * Fetches the next opcode dword, longjmp on error.
2942 *
2943 * @returns The opcode dword.
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 */
2946DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2947{
2948# ifdef IEM_WITH_CODE_TLB
2949 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2950 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2951 if (RT_LIKELY( pbBuf != NULL
2952 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2953 {
2954 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2955# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2956 return *(uint32_t const *)&pbBuf[offBuf];
2957# else
2958 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2959 pbBuf[offBuf + 1],
2960 pbBuf[offBuf + 2],
2961 pbBuf[offBuf + 3]);
2962# endif
2963 }
2964# else
2965 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2966 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2967 {
2968 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2969# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2970 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2971# else
2972 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2973 pVCpu->iem.s.abOpcode[offOpcode + 1],
2974 pVCpu->iem.s.abOpcode[offOpcode + 2],
2975 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2976# endif
2977 }
2978# endif
2979 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2980}
2981
2982#endif /* !IEM_WITH_SETJMP */
2983
2984
2985/**
2986 * Fetches the next opcode dword, returns automatically on failure.
2987 *
2988 * @param a_pu32 Where to return the opcode dword.
2989 * @remark Implicitly references pVCpu.
2990 */
2991#ifndef IEM_WITH_SETJMP
2992# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2993 do \
2994 { \
2995 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2996 if (rcStrict2 != VINF_SUCCESS) \
2997 return rcStrict2; \
2998 } while (0)
2999#else
3000# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
3001#endif
3002
3003#ifndef IEM_WITH_SETJMP
3004
3005/**
3006 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3007 *
3008 * @returns Strict VBox status code.
3009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3010 * @param pu64 Where to return the opcode dword.
3011 */
3012DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3013{
3014 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3015 if (rcStrict == VINF_SUCCESS)
3016 {
3017 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3018 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3019 pVCpu->iem.s.abOpcode[offOpcode + 1],
3020 pVCpu->iem.s.abOpcode[offOpcode + 2],
3021 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3022 pVCpu->iem.s.offOpcode = offOpcode + 4;
3023 }
3024 else
3025 *pu64 = 0;
3026 return rcStrict;
3027}
3028
3029
3030/**
3031 * Fetches the next opcode dword, zero extending it to a quad word.
3032 *
3033 * @returns Strict VBox status code.
3034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3035 * @param pu64 Where to return the opcode quad word.
3036 */
3037DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3038{
3039 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3040 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3041 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3042
3043 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3044 pVCpu->iem.s.abOpcode[offOpcode + 1],
3045 pVCpu->iem.s.abOpcode[offOpcode + 2],
3046 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3047 pVCpu->iem.s.offOpcode = offOpcode + 4;
3048 return VINF_SUCCESS;
3049}
3050
3051#endif /* !IEM_WITH_SETJMP */
3052
3053
3054/**
3055 * Fetches the next opcode dword and zero extends it to a quad word, returns
3056 * automatically on failure.
3057 *
3058 * @param a_pu64 Where to return the opcode quad word.
3059 * @remark Implicitly references pVCpu.
3060 */
3061#ifndef IEM_WITH_SETJMP
3062# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3063 do \
3064 { \
3065 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3066 if (rcStrict2 != VINF_SUCCESS) \
3067 return rcStrict2; \
3068 } while (0)
3069#else
3070# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3071#endif
3072
3073
3074#ifndef IEM_WITH_SETJMP
3075/**
3076 * Fetches the next signed double word from the opcode stream.
3077 *
3078 * @returns Strict VBox status code.
3079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3080 * @param pi32 Where to return the signed double word.
3081 */
3082DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3083{
3084 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3085}
3086#endif
3087
3088/**
3089 * Fetches the next signed double word from the opcode stream, returning
3090 * automatically on failure.
3091 *
3092 * @param a_pi32 Where to return the signed double word.
3093 * @remark Implicitly references pVCpu.
3094 */
3095#ifndef IEM_WITH_SETJMP
3096# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3097 do \
3098 { \
3099 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3100 if (rcStrict2 != VINF_SUCCESS) \
3101 return rcStrict2; \
3102 } while (0)
3103#else
3104# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3105#endif
3106
3107#ifndef IEM_WITH_SETJMP
3108
3109/**
3110 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3111 *
3112 * @returns Strict VBox status code.
3113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3114 * @param pu64 Where to return the opcode qword.
3115 */
3116DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3117{
3118 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3119 if (rcStrict == VINF_SUCCESS)
3120 {
3121 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3122 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3123 pVCpu->iem.s.abOpcode[offOpcode + 1],
3124 pVCpu->iem.s.abOpcode[offOpcode + 2],
3125 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3126 pVCpu->iem.s.offOpcode = offOpcode + 4;
3127 }
3128 else
3129 *pu64 = 0;
3130 return rcStrict;
3131}
3132
3133
3134/**
3135 * Fetches the next opcode dword, sign extending it into a quad word.
3136 *
3137 * @returns Strict VBox status code.
3138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3139 * @param pu64 Where to return the opcode quad word.
3140 */
3141DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3142{
3143 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3144 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3145 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3146
3147 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3151 *pu64 = i32;
3152 pVCpu->iem.s.offOpcode = offOpcode + 4;
3153 return VINF_SUCCESS;
3154}
3155
3156#endif /* !IEM_WITH_SETJMP */
3157
3158
3159/**
3160 * Fetches the next opcode double word and sign extends it to a quad word,
3161 * returns automatically on failure.
3162 *
3163 * @param a_pu64 Where to return the opcode quad word.
3164 * @remark Implicitly references pVCpu.
3165 */
3166#ifndef IEM_WITH_SETJMP
3167# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3168 do \
3169 { \
3170 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3171 if (rcStrict2 != VINF_SUCCESS) \
3172 return rcStrict2; \
3173 } while (0)
3174#else
3175# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3176#endif
3177
3178#ifndef IEM_WITH_SETJMP
3179
3180/**
3181 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3182 *
3183 * @returns Strict VBox status code.
3184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3185 * @param pu64 Where to return the opcode qword.
3186 */
3187DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3188{
3189 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3190 if (rcStrict == VINF_SUCCESS)
3191 {
3192 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3193# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3194 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3195# else
3196 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3197 pVCpu->iem.s.abOpcode[offOpcode + 1],
3198 pVCpu->iem.s.abOpcode[offOpcode + 2],
3199 pVCpu->iem.s.abOpcode[offOpcode + 3],
3200 pVCpu->iem.s.abOpcode[offOpcode + 4],
3201 pVCpu->iem.s.abOpcode[offOpcode + 5],
3202 pVCpu->iem.s.abOpcode[offOpcode + 6],
3203 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3204# endif
3205 pVCpu->iem.s.offOpcode = offOpcode + 8;
3206 }
3207 else
3208 *pu64 = 0;
3209 return rcStrict;
3210}
3211
3212
3213/**
3214 * Fetches the next opcode qword.
3215 *
3216 * @returns Strict VBox status code.
3217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3218 * @param pu64 Where to return the opcode qword.
3219 */
3220DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3221{
3222 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3223 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3224 {
3225# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3226 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3227# else
3228 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3229 pVCpu->iem.s.abOpcode[offOpcode + 1],
3230 pVCpu->iem.s.abOpcode[offOpcode + 2],
3231 pVCpu->iem.s.abOpcode[offOpcode + 3],
3232 pVCpu->iem.s.abOpcode[offOpcode + 4],
3233 pVCpu->iem.s.abOpcode[offOpcode + 5],
3234 pVCpu->iem.s.abOpcode[offOpcode + 6],
3235 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3236# endif
3237 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3238 return VINF_SUCCESS;
3239 }
3240 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3241}
3242
3243#else /* IEM_WITH_SETJMP */
3244
3245/**
3246 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3247 *
3248 * @returns The opcode qword.
3249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3250 */
3251DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3252{
3253# ifdef IEM_WITH_CODE_TLB
3254 uint64_t u64;
3255 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3256 return u64;
3257# else
3258 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3259 if (rcStrict == VINF_SUCCESS)
3260 {
3261 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3262 pVCpu->iem.s.offOpcode = offOpcode + 8;
3263# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3264 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3265# else
3266 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3267 pVCpu->iem.s.abOpcode[offOpcode + 1],
3268 pVCpu->iem.s.abOpcode[offOpcode + 2],
3269 pVCpu->iem.s.abOpcode[offOpcode + 3],
3270 pVCpu->iem.s.abOpcode[offOpcode + 4],
3271 pVCpu->iem.s.abOpcode[offOpcode + 5],
3272 pVCpu->iem.s.abOpcode[offOpcode + 6],
3273 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3274# endif
3275 }
3276 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3277# endif
3278}
3279
3280
3281/**
3282 * Fetches the next opcode qword, longjmp on error.
3283 *
3284 * @returns The opcode qword.
3285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3286 */
3287DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3288{
3289# ifdef IEM_WITH_CODE_TLB
3290 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3291 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3292 if (RT_LIKELY( pbBuf != NULL
3293 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3294 {
3295 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3296# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3297 return *(uint64_t const *)&pbBuf[offBuf];
3298# else
3299 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3300 pbBuf[offBuf + 1],
3301 pbBuf[offBuf + 2],
3302 pbBuf[offBuf + 3],
3303 pbBuf[offBuf + 4],
3304 pbBuf[offBuf + 5],
3305 pbBuf[offBuf + 6],
3306 pbBuf[offBuf + 7]);
3307# endif
3308 }
3309# else
3310 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3311 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3312 {
3313 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3314# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3315 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3316# else
3317 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3318 pVCpu->iem.s.abOpcode[offOpcode + 1],
3319 pVCpu->iem.s.abOpcode[offOpcode + 2],
3320 pVCpu->iem.s.abOpcode[offOpcode + 3],
3321 pVCpu->iem.s.abOpcode[offOpcode + 4],
3322 pVCpu->iem.s.abOpcode[offOpcode + 5],
3323 pVCpu->iem.s.abOpcode[offOpcode + 6],
3324 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3325# endif
3326 }
3327# endif
3328 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3329}
3330
3331#endif /* IEM_WITH_SETJMP */
3332
3333/**
3334 * Fetches the next opcode quad word, returns automatically on failure.
3335 *
3336 * @param a_pu64 Where to return the opcode quad word.
3337 * @remark Implicitly references pVCpu.
3338 */
3339#ifndef IEM_WITH_SETJMP
3340# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3341 do \
3342 { \
3343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3344 if (rcStrict2 != VINF_SUCCESS) \
3345 return rcStrict2; \
3346 } while (0)
3347#else
3348# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3349#endif
3350
3351
3352/** @name Misc Worker Functions.
3353 * @{
3354 */
3355
3356/**
3357 * Gets the exception class for the specified exception vector.
3358 *
3359 * @returns The class of the specified exception.
3360 * @param uVector The exception vector.
3361 */
3362IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3363{
3364 Assert(uVector <= X86_XCPT_LAST);
3365 switch (uVector)
3366 {
3367 case X86_XCPT_DE:
3368 case X86_XCPT_TS:
3369 case X86_XCPT_NP:
3370 case X86_XCPT_SS:
3371 case X86_XCPT_GP:
3372 case X86_XCPT_SX: /* AMD only */
3373 return IEMXCPTCLASS_CONTRIBUTORY;
3374
3375 case X86_XCPT_PF:
3376 case X86_XCPT_VE: /* Intel only */
3377 return IEMXCPTCLASS_PAGE_FAULT;
3378
3379 case X86_XCPT_DF:
3380 return IEMXCPTCLASS_DOUBLE_FAULT;
3381 }
3382 return IEMXCPTCLASS_BENIGN;
3383}
3384
3385
3386/**
3387 * Evaluates how to handle an exception caused during delivery of another event
3388 * (exception / interrupt).
3389 *
3390 * @returns How to handle the recursive exception.
3391 * @param pVCpu The cross context virtual CPU structure of the
3392 * calling thread.
3393 * @param fPrevFlags The flags of the previous event.
3394 * @param uPrevVector The vector of the previous event.
3395 * @param fCurFlags The flags of the current exception.
3396 * @param uCurVector The vector of the current exception.
3397 * @param pfXcptRaiseInfo Where to store additional information about the
3398 * exception condition. Optional.
3399 */
3400VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3401 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3402{
3403 /*
3404 * Only CPU exceptions can be raised while delivering other events, software interrupt
3405 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3406 */
3407 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3408 Assert(pVCpu); RT_NOREF(pVCpu);
3409 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3410
3411 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3412 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3413 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3414 {
3415 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3416 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3417 {
3418 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3419 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3420 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3421 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3422 {
3423 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3424 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3425 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3426 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3427 uCurVector, pVCpu->cpum.GstCtx.cr2));
3428 }
3429 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3430 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3431 {
3432 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3433 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3434 }
3435 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3436 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3437 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3438 {
3439 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3440 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3441 }
3442 }
3443 else
3444 {
3445 if (uPrevVector == X86_XCPT_NMI)
3446 {
3447 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3448 if (uCurVector == X86_XCPT_PF)
3449 {
3450 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3451 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3452 }
3453 }
3454 else if ( uPrevVector == X86_XCPT_AC
3455 && uCurVector == X86_XCPT_AC)
3456 {
3457 enmRaise = IEMXCPTRAISE_CPU_HANG;
3458 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3459 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3460 }
3461 }
3462 }
3463 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3464 {
3465 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3466 if (uCurVector == X86_XCPT_PF)
3467 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3468 }
3469 else
3470 {
3471 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3472 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3473 }
3474
3475 if (pfXcptRaiseInfo)
3476 *pfXcptRaiseInfo = fRaiseInfo;
3477 return enmRaise;
3478}
3479
3480
3481/**
3482 * Enters the CPU shutdown state initiated by a triple fault or other
3483 * unrecoverable conditions.
3484 *
3485 * @returns Strict VBox status code.
3486 * @param pVCpu The cross context virtual CPU structure of the
3487 * calling thread.
3488 */
3489IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3490{
3491 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3492 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3493
3494 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3495 {
3496 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3497 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3498 }
3499
3500 RT_NOREF(pVCpu);
3501 return VINF_EM_TRIPLE_FAULT;
3502}
3503
3504
3505/**
3506 * Validates a new SS segment.
3507 *
3508 * @returns VBox strict status code.
3509 * @param pVCpu The cross context virtual CPU structure of the
3510 * calling thread.
3511 * @param NewSS The new SS selctor.
3512 * @param uCpl The CPL to load the stack for.
3513 * @param pDesc Where to return the descriptor.
3514 */
3515IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3516{
3517 /* Null selectors are not allowed (we're not called for dispatching
3518 interrupts with SS=0 in long mode). */
3519 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3520 {
3521 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3522 return iemRaiseTaskSwitchFault0(pVCpu);
3523 }
3524
3525 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3526 if ((NewSS & X86_SEL_RPL) != uCpl)
3527 {
3528 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3529 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3530 }
3531
3532 /*
3533 * Read the descriptor.
3534 */
3535 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3536 if (rcStrict != VINF_SUCCESS)
3537 return rcStrict;
3538
3539 /*
3540 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3541 */
3542 if (!pDesc->Legacy.Gen.u1DescType)
3543 {
3544 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3545 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3546 }
3547
3548 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3549 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3550 {
3551 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3552 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3553 }
3554 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3555 {
3556 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3557 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3558 }
3559
3560 /* Is it there? */
3561 /** @todo testcase: Is this checked before the canonical / limit check below? */
3562 if (!pDesc->Legacy.Gen.u1Present)
3563 {
3564 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3565 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3566 }
3567
3568 return VINF_SUCCESS;
3569}
3570
3571
3572/**
3573 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3574 * not.
3575 *
3576 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3577 */
3578#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3579# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3580#else
3581# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3582#endif
3583
3584/**
3585 * Updates the EFLAGS in the correct manner wrt. PATM.
3586 *
3587 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3588 * @param a_fEfl The new EFLAGS.
3589 */
3590#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3591# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3592#else
3593# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3594#endif
3595
3596
3597/** @} */
3598
3599/** @name Raising Exceptions.
3600 *
3601 * @{
3602 */
3603
3604
3605/**
3606 * Loads the specified stack far pointer from the TSS.
3607 *
3608 * @returns VBox strict status code.
3609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3610 * @param uCpl The CPL to load the stack for.
3611 * @param pSelSS Where to return the new stack segment.
3612 * @param puEsp Where to return the new stack pointer.
3613 */
3614IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3615{
3616 VBOXSTRICTRC rcStrict;
3617 Assert(uCpl < 4);
3618
3619 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3620 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3621 {
3622 /*
3623 * 16-bit TSS (X86TSS16).
3624 */
3625 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3626 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3627 {
3628 uint32_t off = uCpl * 4 + 2;
3629 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3630 {
3631 /** @todo check actual access pattern here. */
3632 uint32_t u32Tmp = 0; /* gcc maybe... */
3633 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3634 if (rcStrict == VINF_SUCCESS)
3635 {
3636 *puEsp = RT_LOWORD(u32Tmp);
3637 *pSelSS = RT_HIWORD(u32Tmp);
3638 return VINF_SUCCESS;
3639 }
3640 }
3641 else
3642 {
3643 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3644 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3645 }
3646 break;
3647 }
3648
3649 /*
3650 * 32-bit TSS (X86TSS32).
3651 */
3652 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3653 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3654 {
3655 uint32_t off = uCpl * 8 + 4;
3656 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3657 {
3658/** @todo check actual access pattern here. */
3659 uint64_t u64Tmp;
3660 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3661 if (rcStrict == VINF_SUCCESS)
3662 {
3663 *puEsp = u64Tmp & UINT32_MAX;
3664 *pSelSS = (RTSEL)(u64Tmp >> 32);
3665 return VINF_SUCCESS;
3666 }
3667 }
3668 else
3669 {
3670 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3671 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3672 }
3673 break;
3674 }
3675
3676 default:
3677 AssertFailed();
3678 rcStrict = VERR_IEM_IPE_4;
3679 break;
3680 }
3681
3682 *puEsp = 0; /* make gcc happy */
3683 *pSelSS = 0; /* make gcc happy */
3684 return rcStrict;
3685}
3686
3687
3688/**
3689 * Loads the specified stack pointer from the 64-bit TSS.
3690 *
3691 * @returns VBox strict status code.
3692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3693 * @param uCpl The CPL to load the stack for.
3694 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3695 * @param puRsp Where to return the new stack pointer.
3696 */
3697IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3698{
3699 Assert(uCpl < 4);
3700 Assert(uIst < 8);
3701 *puRsp = 0; /* make gcc happy */
3702
3703 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3704 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3705
3706 uint32_t off;
3707 if (uIst)
3708 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3709 else
3710 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3711 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3712 {
3713 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3714 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3715 }
3716
3717 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3718}
3719
3720
3721/**
3722 * Adjust the CPU state according to the exception being raised.
3723 *
3724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3725 * @param u8Vector The exception that has been raised.
3726 */
3727DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3728{
3729 switch (u8Vector)
3730 {
3731 case X86_XCPT_DB:
3732 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3733 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3734 break;
3735 /** @todo Read the AMD and Intel exception reference... */
3736 }
3737}
3738
3739
3740/**
3741 * Implements exceptions and interrupts for real mode.
3742 *
3743 * @returns VBox strict status code.
3744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3745 * @param cbInstr The number of bytes to offset rIP by in the return
3746 * address.
3747 * @param u8Vector The interrupt / exception vector number.
3748 * @param fFlags The flags.
3749 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3750 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3751 */
3752IEM_STATIC VBOXSTRICTRC
3753iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3754 uint8_t cbInstr,
3755 uint8_t u8Vector,
3756 uint32_t fFlags,
3757 uint16_t uErr,
3758 uint64_t uCr2)
3759{
3760 NOREF(uErr); NOREF(uCr2);
3761 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3762
3763 /*
3764 * Read the IDT entry.
3765 */
3766 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3767 {
3768 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3769 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3770 }
3771 RTFAR16 Idte;
3772 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3773 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3774 {
3775 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3776 return rcStrict;
3777 }
3778
3779 /*
3780 * Push the stack frame.
3781 */
3782 uint16_t *pu16Frame;
3783 uint64_t uNewRsp;
3784 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787
3788 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3789#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3790 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3791 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3792 fEfl |= UINT16_C(0xf000);
3793#endif
3794 pu16Frame[2] = (uint16_t)fEfl;
3795 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3796 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3797 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3798 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3799 return rcStrict;
3800
3801 /*
3802 * Load the vector address into cs:ip and make exception specific state
3803 * adjustments.
3804 */
3805 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3806 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3807 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3808 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3809 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3810 pVCpu->cpum.GstCtx.rip = Idte.off;
3811 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3812 IEMMISC_SET_EFL(pVCpu, fEfl);
3813
3814 /** @todo do we actually do this in real mode? */
3815 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3816 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3817
3818 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3819}
3820
3821
3822/**
3823 * Loads a NULL data selector into when coming from V8086 mode.
3824 *
3825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3826 * @param pSReg Pointer to the segment register.
3827 */
3828IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3829{
3830 pSReg->Sel = 0;
3831 pSReg->ValidSel = 0;
3832 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3833 {
3834 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3835 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3836 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3837 }
3838 else
3839 {
3840 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3841 /** @todo check this on AMD-V */
3842 pSReg->u64Base = 0;
3843 pSReg->u32Limit = 0;
3844 }
3845}
3846
3847
3848/**
3849 * Loads a segment selector during a task switch in V8086 mode.
3850 *
3851 * @param pSReg Pointer to the segment register.
3852 * @param uSel The selector value to load.
3853 */
3854IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3855{
3856 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3857 pSReg->Sel = uSel;
3858 pSReg->ValidSel = uSel;
3859 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3860 pSReg->u64Base = uSel << 4;
3861 pSReg->u32Limit = 0xffff;
3862 pSReg->Attr.u = 0xf3;
3863}
3864
3865
3866/**
3867 * Loads a NULL data selector into a selector register, both the hidden and
3868 * visible parts, in protected mode.
3869 *
3870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3871 * @param pSReg Pointer to the segment register.
3872 * @param uRpl The RPL.
3873 */
3874IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3875{
3876 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3877 * data selector in protected mode. */
3878 pSReg->Sel = uRpl;
3879 pSReg->ValidSel = uRpl;
3880 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3881 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3882 {
3883 /* VT-x (Intel 3960x) observed doing something like this. */
3884 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3885 pSReg->u32Limit = UINT32_MAX;
3886 pSReg->u64Base = 0;
3887 }
3888 else
3889 {
3890 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3891 pSReg->u32Limit = 0;
3892 pSReg->u64Base = 0;
3893 }
3894}
3895
3896
3897/**
3898 * Loads a segment selector during a task switch in protected mode.
3899 *
3900 * In this task switch scenario, we would throw \#TS exceptions rather than
3901 * \#GPs.
3902 *
3903 * @returns VBox strict status code.
3904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3905 * @param pSReg Pointer to the segment register.
3906 * @param uSel The new selector value.
3907 *
3908 * @remarks This does _not_ handle CS or SS.
3909 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3910 */
3911IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3912{
3913 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3914
3915 /* Null data selector. */
3916 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3917 {
3918 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3919 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3920 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3921 return VINF_SUCCESS;
3922 }
3923
3924 /* Fetch the descriptor. */
3925 IEMSELDESC Desc;
3926 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3927 if (rcStrict != VINF_SUCCESS)
3928 {
3929 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3930 VBOXSTRICTRC_VAL(rcStrict)));
3931 return rcStrict;
3932 }
3933
3934 /* Must be a data segment or readable code segment. */
3935 if ( !Desc.Legacy.Gen.u1DescType
3936 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3937 {
3938 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3939 Desc.Legacy.Gen.u4Type));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3941 }
3942
3943 /* Check privileges for data segments and non-conforming code segments. */
3944 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3945 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3946 {
3947 /* The RPL and the new CPL must be less than or equal to the DPL. */
3948 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3949 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3950 {
3951 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3952 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3953 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3954 }
3955 }
3956
3957 /* Is it there? */
3958 if (!Desc.Legacy.Gen.u1Present)
3959 {
3960 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3961 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3962 }
3963
3964 /* The base and limit. */
3965 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3966 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3967
3968 /*
3969 * Ok, everything checked out fine. Now set the accessed bit before
3970 * committing the result into the registers.
3971 */
3972 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3973 {
3974 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3975 if (rcStrict != VINF_SUCCESS)
3976 return rcStrict;
3977 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3978 }
3979
3980 /* Commit */
3981 pSReg->Sel = uSel;
3982 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3983 pSReg->u32Limit = cbLimit;
3984 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3985 pSReg->ValidSel = uSel;
3986 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3987 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3988 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3989
3990 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3991 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3992 return VINF_SUCCESS;
3993}
3994
3995
3996/**
3997 * Performs a task switch.
3998 *
3999 * If the task switch is the result of a JMP, CALL or IRET instruction, the
4000 * caller is responsible for performing the necessary checks (like DPL, TSS
4001 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
4002 * reference for JMP, CALL, IRET.
4003 *
4004 * If the task switch is the due to a software interrupt or hardware exception,
4005 * the caller is responsible for validating the TSS selector and descriptor. See
4006 * Intel Instruction reference for INT n.
4007 *
4008 * @returns VBox strict status code.
4009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4010 * @param enmTaskSwitch The cause of the task switch.
4011 * @param uNextEip The EIP effective after the task switch.
4012 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4013 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4014 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4015 * @param SelTSS The TSS selector of the new task.
4016 * @param pNewDescTSS Pointer to the new TSS descriptor.
4017 */
4018IEM_STATIC VBOXSTRICTRC
4019iemTaskSwitch(PVMCPU pVCpu,
4020 IEMTASKSWITCH enmTaskSwitch,
4021 uint32_t uNextEip,
4022 uint32_t fFlags,
4023 uint16_t uErr,
4024 uint64_t uCr2,
4025 RTSEL SelTSS,
4026 PIEMSELDESC pNewDescTSS)
4027{
4028 Assert(!IEM_IS_REAL_MODE(pVCpu));
4029 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4030 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4031
4032 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4033 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4034 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4035 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4036 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4037
4038 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4039 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4040
4041 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4042 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4043
4044 /* Update CR2 in case it's a page-fault. */
4045 /** @todo This should probably be done much earlier in IEM/PGM. See
4046 * @bugref{5653#c49}. */
4047 if (fFlags & IEM_XCPT_FLAGS_CR2)
4048 pVCpu->cpum.GstCtx.cr2 = uCr2;
4049
4050 /*
4051 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4052 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4053 */
4054 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4055 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4056 if (uNewTSSLimit < uNewTSSLimitMin)
4057 {
4058 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4059 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4060 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4061 }
4062
4063 /*
4064 * Task switches in VMX non-root mode always cause task switches.
4065 * The new TSS must have been read and validated (DPL, limits etc.) before a
4066 * task-switch VM-exit commences.
4067 *
4068 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4069 */
4070 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4071 {
4072 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4073 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4074 }
4075
4076 /*
4077 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4078 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4079 */
4080 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4081 {
4082 uint32_t const uExitInfo1 = SelTSS;
4083 uint32_t uExitInfo2 = uErr;
4084 switch (enmTaskSwitch)
4085 {
4086 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4087 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4088 default: break;
4089 }
4090 if (fFlags & IEM_XCPT_FLAGS_ERR)
4091 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4092 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4093 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4094
4095 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4096 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4097 RT_NOREF2(uExitInfo1, uExitInfo2);
4098 }
4099
4100 /*
4101 * Check the current TSS limit. The last written byte to the current TSS during the
4102 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4103 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4104 *
4105 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4106 * end up with smaller than "legal" TSS limits.
4107 */
4108 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4109 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4110 if (uCurTSSLimit < uCurTSSLimitMin)
4111 {
4112 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4113 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4114 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4115 }
4116
4117 /*
4118 * Verify that the new TSS can be accessed and map it. Map only the required contents
4119 * and not the entire TSS.
4120 */
4121 void *pvNewTSS;
4122 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4123 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4124 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4125 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4126 * not perform correct translation if this happens. See Intel spec. 7.2.1
4127 * "Task-State Segment" */
4128 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4129 if (rcStrict != VINF_SUCCESS)
4130 {
4131 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4132 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4133 return rcStrict;
4134 }
4135
4136 /*
4137 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4138 */
4139 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4140 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4141 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4142 {
4143 PX86DESC pDescCurTSS;
4144 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4145 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4146 if (rcStrict != VINF_SUCCESS)
4147 {
4148 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4149 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4150 return rcStrict;
4151 }
4152
4153 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4154 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4155 if (rcStrict != VINF_SUCCESS)
4156 {
4157 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4158 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4159 return rcStrict;
4160 }
4161
4162 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4163 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4164 {
4165 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4166 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4167 u32EFlags &= ~X86_EFL_NT;
4168 }
4169 }
4170
4171 /*
4172 * Save the CPU state into the current TSS.
4173 */
4174 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4175 if (GCPtrNewTSS == GCPtrCurTSS)
4176 {
4177 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4178 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4179 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4180 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4181 pVCpu->cpum.GstCtx.ldtr.Sel));
4182 }
4183 if (fIsNewTSS386)
4184 {
4185 /*
4186 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4187 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4188 */
4189 void *pvCurTSS32;
4190 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4191 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4192 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4193 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4194 if (rcStrict != VINF_SUCCESS)
4195 {
4196 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4197 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4198 return rcStrict;
4199 }
4200
4201 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4202 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4203 pCurTSS32->eip = uNextEip;
4204 pCurTSS32->eflags = u32EFlags;
4205 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4206 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4207 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4208 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4209 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4210 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4211 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4212 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4213 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4214 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4215 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4216 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4217 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4218 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4219
4220 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4221 if (rcStrict != VINF_SUCCESS)
4222 {
4223 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4224 VBOXSTRICTRC_VAL(rcStrict)));
4225 return rcStrict;
4226 }
4227 }
4228 else
4229 {
4230 /*
4231 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4232 */
4233 void *pvCurTSS16;
4234 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4235 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4236 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4237 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4238 if (rcStrict != VINF_SUCCESS)
4239 {
4240 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4241 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4242 return rcStrict;
4243 }
4244
4245 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4246 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4247 pCurTSS16->ip = uNextEip;
4248 pCurTSS16->flags = u32EFlags;
4249 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4250 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4251 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4252 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4253 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4254 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4255 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4256 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4257 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4258 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4259 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4260 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4261
4262 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4263 if (rcStrict != VINF_SUCCESS)
4264 {
4265 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4266 VBOXSTRICTRC_VAL(rcStrict)));
4267 return rcStrict;
4268 }
4269 }
4270
4271 /*
4272 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4273 */
4274 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4275 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4276 {
4277 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4278 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4279 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4280 }
4281
4282 /*
4283 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4284 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4285 */
4286 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4287 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4288 bool fNewDebugTrap;
4289 if (fIsNewTSS386)
4290 {
4291 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4292 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4293 uNewEip = pNewTSS32->eip;
4294 uNewEflags = pNewTSS32->eflags;
4295 uNewEax = pNewTSS32->eax;
4296 uNewEcx = pNewTSS32->ecx;
4297 uNewEdx = pNewTSS32->edx;
4298 uNewEbx = pNewTSS32->ebx;
4299 uNewEsp = pNewTSS32->esp;
4300 uNewEbp = pNewTSS32->ebp;
4301 uNewEsi = pNewTSS32->esi;
4302 uNewEdi = pNewTSS32->edi;
4303 uNewES = pNewTSS32->es;
4304 uNewCS = pNewTSS32->cs;
4305 uNewSS = pNewTSS32->ss;
4306 uNewDS = pNewTSS32->ds;
4307 uNewFS = pNewTSS32->fs;
4308 uNewGS = pNewTSS32->gs;
4309 uNewLdt = pNewTSS32->selLdt;
4310 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4311 }
4312 else
4313 {
4314 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4315 uNewCr3 = 0;
4316 uNewEip = pNewTSS16->ip;
4317 uNewEflags = pNewTSS16->flags;
4318 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4319 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4320 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4321 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4322 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4323 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4324 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4325 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4326 uNewES = pNewTSS16->es;
4327 uNewCS = pNewTSS16->cs;
4328 uNewSS = pNewTSS16->ss;
4329 uNewDS = pNewTSS16->ds;
4330 uNewFS = 0;
4331 uNewGS = 0;
4332 uNewLdt = pNewTSS16->selLdt;
4333 fNewDebugTrap = false;
4334 }
4335
4336 if (GCPtrNewTSS == GCPtrCurTSS)
4337 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4338 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4339
4340 /*
4341 * We're done accessing the new TSS.
4342 */
4343 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4344 if (rcStrict != VINF_SUCCESS)
4345 {
4346 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4347 return rcStrict;
4348 }
4349
4350 /*
4351 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4352 */
4353 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4354 {
4355 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4356 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4357 if (rcStrict != VINF_SUCCESS)
4358 {
4359 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4360 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4361 return rcStrict;
4362 }
4363
4364 /* Check that the descriptor indicates the new TSS is available (not busy). */
4365 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4366 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4367 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4368
4369 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4370 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4371 if (rcStrict != VINF_SUCCESS)
4372 {
4373 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4374 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4375 return rcStrict;
4376 }
4377 }
4378
4379 /*
4380 * From this point on, we're technically in the new task. We will defer exceptions
4381 * until the completion of the task switch but before executing any instructions in the new task.
4382 */
4383 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4384 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4385 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4386 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4387 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4388 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4389 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4390
4391 /* Set the busy bit in TR. */
4392 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4393 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4394 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4395 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4396 {
4397 uNewEflags |= X86_EFL_NT;
4398 }
4399
4400 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4401 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4402 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4403
4404 pVCpu->cpum.GstCtx.eip = uNewEip;
4405 pVCpu->cpum.GstCtx.eax = uNewEax;
4406 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4407 pVCpu->cpum.GstCtx.edx = uNewEdx;
4408 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4409 pVCpu->cpum.GstCtx.esp = uNewEsp;
4410 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4411 pVCpu->cpum.GstCtx.esi = uNewEsi;
4412 pVCpu->cpum.GstCtx.edi = uNewEdi;
4413
4414 uNewEflags &= X86_EFL_LIVE_MASK;
4415 uNewEflags |= X86_EFL_RA1_MASK;
4416 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4417
4418 /*
4419 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4420 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4421 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4422 */
4423 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4424 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4425
4426 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4427 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4428
4429 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4430 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4431
4432 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4433 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4434
4435 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4436 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4437
4438 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4439 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4440 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4441
4442 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4443 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4444 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4445 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4446
4447 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4448 {
4449 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4450 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4451 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4452 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4453 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4454 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4455 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4456 }
4457
4458 /*
4459 * Switch CR3 for the new task.
4460 */
4461 if ( fIsNewTSS386
4462 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4463 {
4464 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4465 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4466 AssertRCSuccessReturn(rc, rc);
4467
4468 /* Inform PGM. */
4469 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4470 AssertRCReturn(rc, rc);
4471 /* ignore informational status codes */
4472
4473 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4474 }
4475
4476 /*
4477 * Switch LDTR for the new task.
4478 */
4479 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4480 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4481 else
4482 {
4483 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4484
4485 IEMSELDESC DescNewLdt;
4486 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4487 if (rcStrict != VINF_SUCCESS)
4488 {
4489 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4490 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4491 return rcStrict;
4492 }
4493 if ( !DescNewLdt.Legacy.Gen.u1Present
4494 || DescNewLdt.Legacy.Gen.u1DescType
4495 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4496 {
4497 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4498 uNewLdt, DescNewLdt.Legacy.u));
4499 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4500 }
4501
4502 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4503 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4504 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4505 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4506 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4507 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4508 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4509 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4510 }
4511
4512 IEMSELDESC DescSS;
4513 if (IEM_IS_V86_MODE(pVCpu))
4514 {
4515 pVCpu->iem.s.uCpl = 3;
4516 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4517 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4518 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4519 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4520 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4521 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4522
4523 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4524 DescSS.Legacy.u = 0;
4525 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4526 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4527 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4528 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4529 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4530 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4531 DescSS.Legacy.Gen.u2Dpl = 3;
4532 }
4533 else
4534 {
4535 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4536
4537 /*
4538 * Load the stack segment for the new task.
4539 */
4540 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4541 {
4542 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4543 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 /* Fetch the descriptor. */
4547 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4548 if (rcStrict != VINF_SUCCESS)
4549 {
4550 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4551 VBOXSTRICTRC_VAL(rcStrict)));
4552 return rcStrict;
4553 }
4554
4555 /* SS must be a data segment and writable. */
4556 if ( !DescSS.Legacy.Gen.u1DescType
4557 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4558 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4559 {
4560 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4561 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4562 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4563 }
4564
4565 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4566 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4567 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4568 {
4569 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4570 uNewCpl));
4571 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4572 }
4573
4574 /* Is it there? */
4575 if (!DescSS.Legacy.Gen.u1Present)
4576 {
4577 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4578 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4579 }
4580
4581 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4582 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4583
4584 /* Set the accessed bit before committing the result into SS. */
4585 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4586 {
4587 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4588 if (rcStrict != VINF_SUCCESS)
4589 return rcStrict;
4590 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4591 }
4592
4593 /* Commit SS. */
4594 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4595 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4596 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4597 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4598 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4599 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4600 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4601
4602 /* CPL has changed, update IEM before loading rest of segments. */
4603 pVCpu->iem.s.uCpl = uNewCpl;
4604
4605 /*
4606 * Load the data segments for the new task.
4607 */
4608 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4609 if (rcStrict != VINF_SUCCESS)
4610 return rcStrict;
4611 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4612 if (rcStrict != VINF_SUCCESS)
4613 return rcStrict;
4614 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4615 if (rcStrict != VINF_SUCCESS)
4616 return rcStrict;
4617 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4618 if (rcStrict != VINF_SUCCESS)
4619 return rcStrict;
4620
4621 /*
4622 * Load the code segment for the new task.
4623 */
4624 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4625 {
4626 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4627 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4628 }
4629
4630 /* Fetch the descriptor. */
4631 IEMSELDESC DescCS;
4632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4633 if (rcStrict != VINF_SUCCESS)
4634 {
4635 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4636 return rcStrict;
4637 }
4638
4639 /* CS must be a code segment. */
4640 if ( !DescCS.Legacy.Gen.u1DescType
4641 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4642 {
4643 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4644 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4645 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4646 }
4647
4648 /* For conforming CS, DPL must be less than or equal to the RPL. */
4649 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4650 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4651 {
4652 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4653 DescCS.Legacy.Gen.u2Dpl));
4654 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4655 }
4656
4657 /* For non-conforming CS, DPL must match RPL. */
4658 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4659 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4660 {
4661 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4662 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4663 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4664 }
4665
4666 /* Is it there? */
4667 if (!DescCS.Legacy.Gen.u1Present)
4668 {
4669 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4670 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4671 }
4672
4673 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4674 u64Base = X86DESC_BASE(&DescCS.Legacy);
4675
4676 /* Set the accessed bit before committing the result into CS. */
4677 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4678 {
4679 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4680 if (rcStrict != VINF_SUCCESS)
4681 return rcStrict;
4682 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4683 }
4684
4685 /* Commit CS. */
4686 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4687 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4688 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4689 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4690 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4691 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4692 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4693 }
4694
4695 /** @todo Debug trap. */
4696 if (fIsNewTSS386 && fNewDebugTrap)
4697 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4698
4699 /*
4700 * Construct the error code masks based on what caused this task switch.
4701 * See Intel Instruction reference for INT.
4702 */
4703 uint16_t uExt;
4704 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4705 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4706 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4707 {
4708 uExt = 1;
4709 }
4710 else
4711 uExt = 0;
4712
4713 /*
4714 * Push any error code on to the new stack.
4715 */
4716 if (fFlags & IEM_XCPT_FLAGS_ERR)
4717 {
4718 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4719 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4720 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4721
4722 /* Check that there is sufficient space on the stack. */
4723 /** @todo Factor out segment limit checking for normal/expand down segments
4724 * into a separate function. */
4725 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4726 {
4727 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4728 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4729 {
4730 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4731 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4732 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4733 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4734 }
4735 }
4736 else
4737 {
4738 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4739 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4740 {
4741 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4742 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4743 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4744 }
4745 }
4746
4747
4748 if (fIsNewTSS386)
4749 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4750 else
4751 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4752 if (rcStrict != VINF_SUCCESS)
4753 {
4754 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4755 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4756 return rcStrict;
4757 }
4758 }
4759
4760 /* Check the new EIP against the new CS limit. */
4761 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4762 {
4763 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4764 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4765 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4766 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4767 }
4768
4769 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4770 pVCpu->cpum.GstCtx.ss.Sel));
4771 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4772}
4773
4774
4775/**
4776 * Implements exceptions and interrupts for protected mode.
4777 *
4778 * @returns VBox strict status code.
4779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4780 * @param cbInstr The number of bytes to offset rIP by in the return
4781 * address.
4782 * @param u8Vector The interrupt / exception vector number.
4783 * @param fFlags The flags.
4784 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4785 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4786 */
4787IEM_STATIC VBOXSTRICTRC
4788iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4789 uint8_t cbInstr,
4790 uint8_t u8Vector,
4791 uint32_t fFlags,
4792 uint16_t uErr,
4793 uint64_t uCr2)
4794{
4795 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4796
4797 /*
4798 * Read the IDT entry.
4799 */
4800 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4801 {
4802 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4803 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4804 }
4805 X86DESC Idte;
4806 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4807 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4808 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4809 {
4810 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4811 return rcStrict;
4812 }
4813 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4814 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4815 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4816
4817 /*
4818 * Check the descriptor type, DPL and such.
4819 * ASSUMES this is done in the same order as described for call-gate calls.
4820 */
4821 if (Idte.Gate.u1DescType)
4822 {
4823 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4824 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4825 }
4826 bool fTaskGate = false;
4827 uint8_t f32BitGate = true;
4828 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4829 switch (Idte.Gate.u4Type)
4830 {
4831 case X86_SEL_TYPE_SYS_UNDEFINED:
4832 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4833 case X86_SEL_TYPE_SYS_LDT:
4834 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4835 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4836 case X86_SEL_TYPE_SYS_UNDEFINED2:
4837 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4838 case X86_SEL_TYPE_SYS_UNDEFINED3:
4839 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4840 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4841 case X86_SEL_TYPE_SYS_UNDEFINED4:
4842 {
4843 /** @todo check what actually happens when the type is wrong...
4844 * esp. call gates. */
4845 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4846 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4847 }
4848
4849 case X86_SEL_TYPE_SYS_286_INT_GATE:
4850 f32BitGate = false;
4851 RT_FALL_THRU();
4852 case X86_SEL_TYPE_SYS_386_INT_GATE:
4853 fEflToClear |= X86_EFL_IF;
4854 break;
4855
4856 case X86_SEL_TYPE_SYS_TASK_GATE:
4857 fTaskGate = true;
4858#ifndef IEM_IMPLEMENTS_TASKSWITCH
4859 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4860#endif
4861 break;
4862
4863 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4864 f32BitGate = false;
4865 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4866 break;
4867
4868 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4869 }
4870
4871 /* Check DPL against CPL if applicable. */
4872 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4873 {
4874 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4875 {
4876 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4877 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4878 }
4879 }
4880
4881 /* Is it there? */
4882 if (!Idte.Gate.u1Present)
4883 {
4884 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4885 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4886 }
4887
4888 /* Is it a task-gate? */
4889 if (fTaskGate)
4890 {
4891 /*
4892 * Construct the error code masks based on what caused this task switch.
4893 * See Intel Instruction reference for INT.
4894 */
4895 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4896 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4897 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4898 RTSEL SelTSS = Idte.Gate.u16Sel;
4899
4900 /*
4901 * Fetch the TSS descriptor in the GDT.
4902 */
4903 IEMSELDESC DescTSS;
4904 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4905 if (rcStrict != VINF_SUCCESS)
4906 {
4907 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4908 VBOXSTRICTRC_VAL(rcStrict)));
4909 return rcStrict;
4910 }
4911
4912 /* The TSS descriptor must be a system segment and be available (not busy). */
4913 if ( DescTSS.Legacy.Gen.u1DescType
4914 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4915 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4916 {
4917 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4918 u8Vector, SelTSS, DescTSS.Legacy.au64));
4919 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4920 }
4921
4922 /* The TSS must be present. */
4923 if (!DescTSS.Legacy.Gen.u1Present)
4924 {
4925 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4926 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4927 }
4928
4929 /* Do the actual task switch. */
4930 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4931 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4932 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4933 }
4934
4935 /* A null CS is bad. */
4936 RTSEL NewCS = Idte.Gate.u16Sel;
4937 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4938 {
4939 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4940 return iemRaiseGeneralProtectionFault0(pVCpu);
4941 }
4942
4943 /* Fetch the descriptor for the new CS. */
4944 IEMSELDESC DescCS;
4945 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4946 if (rcStrict != VINF_SUCCESS)
4947 {
4948 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4949 return rcStrict;
4950 }
4951
4952 /* Must be a code segment. */
4953 if (!DescCS.Legacy.Gen.u1DescType)
4954 {
4955 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4956 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4957 }
4958 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4959 {
4960 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4961 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4962 }
4963
4964 /* Don't allow lowering the privilege level. */
4965 /** @todo Does the lowering of privileges apply to software interrupts
4966 * only? This has bearings on the more-privileged or
4967 * same-privilege stack behavior further down. A testcase would
4968 * be nice. */
4969 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4970 {
4971 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4972 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4973 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4974 }
4975
4976 /* Make sure the selector is present. */
4977 if (!DescCS.Legacy.Gen.u1Present)
4978 {
4979 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4980 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4981 }
4982
4983 /* Check the new EIP against the new CS limit. */
4984 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4985 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4986 ? Idte.Gate.u16OffsetLow
4987 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4988 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4989 if (uNewEip > cbLimitCS)
4990 {
4991 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4992 u8Vector, uNewEip, cbLimitCS, NewCS));
4993 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4994 }
4995 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4996
4997 /* Calc the flag image to push. */
4998 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4999 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5000 fEfl &= ~X86_EFL_RF;
5001 else
5002 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5003
5004 /* From V8086 mode only go to CPL 0. */
5005 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5006 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5007 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5008 {
5009 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5010 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5011 }
5012
5013 /*
5014 * If the privilege level changes, we need to get a new stack from the TSS.
5015 * This in turns means validating the new SS and ESP...
5016 */
5017 if (uNewCpl != pVCpu->iem.s.uCpl)
5018 {
5019 RTSEL NewSS;
5020 uint32_t uNewEsp;
5021 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5022 if (rcStrict != VINF_SUCCESS)
5023 return rcStrict;
5024
5025 IEMSELDESC DescSS;
5026 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5027 if (rcStrict != VINF_SUCCESS)
5028 return rcStrict;
5029 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5030 if (!DescSS.Legacy.Gen.u1DefBig)
5031 {
5032 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5033 uNewEsp = (uint16_t)uNewEsp;
5034 }
5035
5036 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5037
5038 /* Check that there is sufficient space for the stack frame. */
5039 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5040 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5041 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5042 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5043
5044 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5045 {
5046 if ( uNewEsp - 1 > cbLimitSS
5047 || uNewEsp < cbStackFrame)
5048 {
5049 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5050 u8Vector, NewSS, uNewEsp, cbStackFrame));
5051 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5052 }
5053 }
5054 else
5055 {
5056 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5057 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5058 {
5059 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5060 u8Vector, NewSS, uNewEsp, cbStackFrame));
5061 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5062 }
5063 }
5064
5065 /*
5066 * Start making changes.
5067 */
5068
5069 /* Set the new CPL so that stack accesses use it. */
5070 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5071 pVCpu->iem.s.uCpl = uNewCpl;
5072
5073 /* Create the stack frame. */
5074 RTPTRUNION uStackFrame;
5075 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5076 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5077 if (rcStrict != VINF_SUCCESS)
5078 return rcStrict;
5079 void * const pvStackFrame = uStackFrame.pv;
5080 if (f32BitGate)
5081 {
5082 if (fFlags & IEM_XCPT_FLAGS_ERR)
5083 *uStackFrame.pu32++ = uErr;
5084 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5085 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5086 uStackFrame.pu32[2] = fEfl;
5087 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5088 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5089 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5090 if (fEfl & X86_EFL_VM)
5091 {
5092 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5093 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5094 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5095 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5096 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5097 }
5098 }
5099 else
5100 {
5101 if (fFlags & IEM_XCPT_FLAGS_ERR)
5102 *uStackFrame.pu16++ = uErr;
5103 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5104 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5105 uStackFrame.pu16[2] = fEfl;
5106 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5107 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5108 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5109 if (fEfl & X86_EFL_VM)
5110 {
5111 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5112 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5113 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5114 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5115 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5116 }
5117 }
5118 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5119 if (rcStrict != VINF_SUCCESS)
5120 return rcStrict;
5121
5122 /* Mark the selectors 'accessed' (hope this is the correct time). */
5123 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5124 * after pushing the stack frame? (Write protect the gdt + stack to
5125 * find out.) */
5126 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5127 {
5128 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5129 if (rcStrict != VINF_SUCCESS)
5130 return rcStrict;
5131 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5132 }
5133
5134 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5135 {
5136 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5137 if (rcStrict != VINF_SUCCESS)
5138 return rcStrict;
5139 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5140 }
5141
5142 /*
5143 * Start comitting the register changes (joins with the DPL=CPL branch).
5144 */
5145 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5146 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5147 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5148 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5149 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5150 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5151 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5152 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5153 * SP is loaded).
5154 * Need to check the other combinations too:
5155 * - 16-bit TSS, 32-bit handler
5156 * - 32-bit TSS, 16-bit handler */
5157 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5158 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5159 else
5160 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5161
5162 if (fEfl & X86_EFL_VM)
5163 {
5164 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5165 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5166 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5167 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5168 }
5169 }
5170 /*
5171 * Same privilege, no stack change and smaller stack frame.
5172 */
5173 else
5174 {
5175 uint64_t uNewRsp;
5176 RTPTRUNION uStackFrame;
5177 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5178 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5179 if (rcStrict != VINF_SUCCESS)
5180 return rcStrict;
5181 void * const pvStackFrame = uStackFrame.pv;
5182
5183 if (f32BitGate)
5184 {
5185 if (fFlags & IEM_XCPT_FLAGS_ERR)
5186 *uStackFrame.pu32++ = uErr;
5187 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5188 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5189 uStackFrame.pu32[2] = fEfl;
5190 }
5191 else
5192 {
5193 if (fFlags & IEM_XCPT_FLAGS_ERR)
5194 *uStackFrame.pu16++ = uErr;
5195 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5196 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5197 uStackFrame.pu16[2] = fEfl;
5198 }
5199 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5200 if (rcStrict != VINF_SUCCESS)
5201 return rcStrict;
5202
5203 /* Mark the CS selector as 'accessed'. */
5204 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5205 {
5206 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5207 if (rcStrict != VINF_SUCCESS)
5208 return rcStrict;
5209 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5210 }
5211
5212 /*
5213 * Start committing the register changes (joins with the other branch).
5214 */
5215 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5216 }
5217
5218 /* ... register committing continues. */
5219 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5220 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5221 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5222 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5223 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5224 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5225
5226 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5227 fEfl &= ~fEflToClear;
5228 IEMMISC_SET_EFL(pVCpu, fEfl);
5229
5230 if (fFlags & IEM_XCPT_FLAGS_CR2)
5231 pVCpu->cpum.GstCtx.cr2 = uCr2;
5232
5233 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5234 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5235
5236 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5237}
5238
5239
5240/**
5241 * Implements exceptions and interrupts for long mode.
5242 *
5243 * @returns VBox strict status code.
5244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5245 * @param cbInstr The number of bytes to offset rIP by in the return
5246 * address.
5247 * @param u8Vector The interrupt / exception vector number.
5248 * @param fFlags The flags.
5249 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5250 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5251 */
5252IEM_STATIC VBOXSTRICTRC
5253iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5254 uint8_t cbInstr,
5255 uint8_t u8Vector,
5256 uint32_t fFlags,
5257 uint16_t uErr,
5258 uint64_t uCr2)
5259{
5260 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5261
5262 /*
5263 * Read the IDT entry.
5264 */
5265 uint16_t offIdt = (uint16_t)u8Vector << 4;
5266 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5267 {
5268 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5269 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5270 }
5271 X86DESC64 Idte;
5272 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5273 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5274 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5275 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5276 {
5277 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5278 return rcStrict;
5279 }
5280 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5281 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5282 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5283
5284 /*
5285 * Check the descriptor type, DPL and such.
5286 * ASSUMES this is done in the same order as described for call-gate calls.
5287 */
5288 if (Idte.Gate.u1DescType)
5289 {
5290 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5291 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5292 }
5293 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5294 switch (Idte.Gate.u4Type)
5295 {
5296 case AMD64_SEL_TYPE_SYS_INT_GATE:
5297 fEflToClear |= X86_EFL_IF;
5298 break;
5299 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5300 break;
5301
5302 default:
5303 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5304 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5305 }
5306
5307 /* Check DPL against CPL if applicable. */
5308 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5309 {
5310 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5311 {
5312 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5313 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5314 }
5315 }
5316
5317 /* Is it there? */
5318 if (!Idte.Gate.u1Present)
5319 {
5320 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5321 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5322 }
5323
5324 /* A null CS is bad. */
5325 RTSEL NewCS = Idte.Gate.u16Sel;
5326 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5327 {
5328 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5329 return iemRaiseGeneralProtectionFault0(pVCpu);
5330 }
5331
5332 /* Fetch the descriptor for the new CS. */
5333 IEMSELDESC DescCS;
5334 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5335 if (rcStrict != VINF_SUCCESS)
5336 {
5337 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5338 return rcStrict;
5339 }
5340
5341 /* Must be a 64-bit code segment. */
5342 if (!DescCS.Long.Gen.u1DescType)
5343 {
5344 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5345 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5346 }
5347 if ( !DescCS.Long.Gen.u1Long
5348 || DescCS.Long.Gen.u1DefBig
5349 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5350 {
5351 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5352 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5353 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5354 }
5355
5356 /* Don't allow lowering the privilege level. For non-conforming CS
5357 selectors, the CS.DPL sets the privilege level the trap/interrupt
5358 handler runs at. For conforming CS selectors, the CPL remains
5359 unchanged, but the CS.DPL must be <= CPL. */
5360 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5361 * when CPU in Ring-0. Result \#GP? */
5362 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5363 {
5364 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5365 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5366 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5367 }
5368
5369
5370 /* Make sure the selector is present. */
5371 if (!DescCS.Legacy.Gen.u1Present)
5372 {
5373 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5374 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5375 }
5376
5377 /* Check that the new RIP is canonical. */
5378 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5379 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5380 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5381 if (!IEM_IS_CANONICAL(uNewRip))
5382 {
5383 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5384 return iemRaiseGeneralProtectionFault0(pVCpu);
5385 }
5386
5387 /*
5388 * If the privilege level changes or if the IST isn't zero, we need to get
5389 * a new stack from the TSS.
5390 */
5391 uint64_t uNewRsp;
5392 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5393 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5394 if ( uNewCpl != pVCpu->iem.s.uCpl
5395 || Idte.Gate.u3IST != 0)
5396 {
5397 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5398 if (rcStrict != VINF_SUCCESS)
5399 return rcStrict;
5400 }
5401 else
5402 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5403 uNewRsp &= ~(uint64_t)0xf;
5404
5405 /*
5406 * Calc the flag image to push.
5407 */
5408 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5409 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5410 fEfl &= ~X86_EFL_RF;
5411 else
5412 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5413
5414 /*
5415 * Start making changes.
5416 */
5417 /* Set the new CPL so that stack accesses use it. */
5418 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5419 pVCpu->iem.s.uCpl = uNewCpl;
5420
5421 /* Create the stack frame. */
5422 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5423 RTPTRUNION uStackFrame;
5424 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5425 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5426 if (rcStrict != VINF_SUCCESS)
5427 return rcStrict;
5428 void * const pvStackFrame = uStackFrame.pv;
5429
5430 if (fFlags & IEM_XCPT_FLAGS_ERR)
5431 *uStackFrame.pu64++ = uErr;
5432 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5433 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5434 uStackFrame.pu64[2] = fEfl;
5435 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5436 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5437 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5438 if (rcStrict != VINF_SUCCESS)
5439 return rcStrict;
5440
5441 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5442 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5443 * after pushing the stack frame? (Write protect the gdt + stack to
5444 * find out.) */
5445 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5446 {
5447 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5448 if (rcStrict != VINF_SUCCESS)
5449 return rcStrict;
5450 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5451 }
5452
5453 /*
5454 * Start comitting the register changes.
5455 */
5456 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5457 * hidden registers when interrupting 32-bit or 16-bit code! */
5458 if (uNewCpl != uOldCpl)
5459 {
5460 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5461 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5462 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5463 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5464 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5465 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5466 }
5467 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5468 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5469 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5470 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5471 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5472 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5473 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5474 pVCpu->cpum.GstCtx.rip = uNewRip;
5475
5476 fEfl &= ~fEflToClear;
5477 IEMMISC_SET_EFL(pVCpu, fEfl);
5478
5479 if (fFlags & IEM_XCPT_FLAGS_CR2)
5480 pVCpu->cpum.GstCtx.cr2 = uCr2;
5481
5482 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5483 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5484
5485 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5486}
5487
5488
5489/**
5490 * Implements exceptions and interrupts.
5491 *
5492 * All exceptions and interrupts goes thru this function!
5493 *
5494 * @returns VBox strict status code.
5495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5496 * @param cbInstr The number of bytes to offset rIP by in the return
5497 * address.
5498 * @param u8Vector The interrupt / exception vector number.
5499 * @param fFlags The flags.
5500 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5501 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5502 */
5503DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5504iemRaiseXcptOrInt(PVMCPU pVCpu,
5505 uint8_t cbInstr,
5506 uint8_t u8Vector,
5507 uint32_t fFlags,
5508 uint16_t uErr,
5509 uint64_t uCr2)
5510{
5511 /*
5512 * Get all the state that we might need here.
5513 */
5514 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5515 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5516
5517#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5518 /*
5519 * Flush prefetch buffer
5520 */
5521 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5522#endif
5523
5524 /*
5525 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5526 */
5527 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5528 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5529 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5530 | IEM_XCPT_FLAGS_BP_INSTR
5531 | IEM_XCPT_FLAGS_ICEBP_INSTR
5532 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5533 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5534 {
5535 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5536 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5537 u8Vector = X86_XCPT_GP;
5538 uErr = 0;
5539 }
5540#ifdef DBGFTRACE_ENABLED
5541 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5542 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5543 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5544#endif
5545
5546 /*
5547 * Evaluate whether NMI blocking should be in effect.
5548 * Normally, NMI blocking is in effect whenever we inject an NMI.
5549 */
5550 bool fBlockNmi;
5551 if ( u8Vector == X86_XCPT_NMI
5552 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5553 fBlockNmi = true;
5554 else
5555 fBlockNmi = false;
5556
5557#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5558 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5559 {
5560 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5561 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5562 return rcStrict0;
5563
5564 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5565 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5566 {
5567 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5568 fBlockNmi = false;
5569 }
5570 }
5571#endif
5572
5573#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5574 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5575 {
5576 /*
5577 * If the event is being injected as part of VMRUN, it isn't subject to event
5578 * intercepts in the nested-guest. However, secondary exceptions that occur
5579 * during injection of any event -are- subject to exception intercepts.
5580 *
5581 * See AMD spec. 15.20 "Event Injection".
5582 */
5583 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5584 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5585 else
5586 {
5587 /*
5588 * Check and handle if the event being raised is intercepted.
5589 */
5590 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5591 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5592 return rcStrict0;
5593 }
5594 }
5595#endif
5596
5597 /*
5598 * Set NMI blocking if necessary.
5599 */
5600 if ( fBlockNmi
5601 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5602 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5603
5604 /*
5605 * Do recursion accounting.
5606 */
5607 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5608 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5609 if (pVCpu->iem.s.cXcptRecursions == 0)
5610 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5611 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5612 else
5613 {
5614 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5615 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5616 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5617
5618 if (pVCpu->iem.s.cXcptRecursions >= 4)
5619 {
5620#ifdef DEBUG_bird
5621 AssertFailed();
5622#endif
5623 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5624 }
5625
5626 /*
5627 * Evaluate the sequence of recurring events.
5628 */
5629 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5630 NULL /* pXcptRaiseInfo */);
5631 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5632 { /* likely */ }
5633 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5634 {
5635 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5636 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5637 u8Vector = X86_XCPT_DF;
5638 uErr = 0;
5639#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5640 /* VMX nested-guest #DF intercept needs to be checked here. */
5641 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5642 {
5643 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5644 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5645 return rcStrict0;
5646 }
5647#endif
5648 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5649 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5650 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5651 }
5652 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5653 {
5654 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5655 return iemInitiateCpuShutdown(pVCpu);
5656 }
5657 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5658 {
5659 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5660 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5661 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5662 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5663 return VERR_EM_GUEST_CPU_HANG;
5664 }
5665 else
5666 {
5667 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5668 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5669 return VERR_IEM_IPE_9;
5670 }
5671
5672 /*
5673 * The 'EXT' bit is set when an exception occurs during deliver of an external
5674 * event (such as an interrupt or earlier exception)[1]. Privileged software
5675 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5676 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5677 *
5678 * [1] - Intel spec. 6.13 "Error Code"
5679 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5680 * [3] - Intel Instruction reference for INT n.
5681 */
5682 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5683 && (fFlags & IEM_XCPT_FLAGS_ERR)
5684 && u8Vector != X86_XCPT_PF
5685 && u8Vector != X86_XCPT_DF)
5686 {
5687 uErr |= X86_TRAP_ERR_EXTERNAL;
5688 }
5689 }
5690
5691 pVCpu->iem.s.cXcptRecursions++;
5692 pVCpu->iem.s.uCurXcpt = u8Vector;
5693 pVCpu->iem.s.fCurXcpt = fFlags;
5694 pVCpu->iem.s.uCurXcptErr = uErr;
5695 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5696
5697 /*
5698 * Extensive logging.
5699 */
5700#if defined(LOG_ENABLED) && defined(IN_RING3)
5701 if (LogIs3Enabled())
5702 {
5703 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5704 PVM pVM = pVCpu->CTX_SUFF(pVM);
5705 char szRegs[4096];
5706 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5707 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5708 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5709 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5710 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5711 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5712 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5713 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5714 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5715 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5716 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5717 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5718 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5719 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5720 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5721 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5722 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5723 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5724 " efer=%016VR{efer}\n"
5725 " pat=%016VR{pat}\n"
5726 " sf_mask=%016VR{sf_mask}\n"
5727 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5728 " lstar=%016VR{lstar}\n"
5729 " star=%016VR{star} cstar=%016VR{cstar}\n"
5730 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5731 );
5732
5733 char szInstr[256];
5734 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5735 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5736 szInstr, sizeof(szInstr), NULL);
5737 Log3(("%s%s\n", szRegs, szInstr));
5738 }
5739#endif /* LOG_ENABLED */
5740
5741 /*
5742 * Call the mode specific worker function.
5743 */
5744 VBOXSTRICTRC rcStrict;
5745 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5746 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5747 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5748 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5749 else
5750 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5751
5752 /* Flush the prefetch buffer. */
5753#ifdef IEM_WITH_CODE_TLB
5754 pVCpu->iem.s.pbInstrBuf = NULL;
5755#else
5756 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5757#endif
5758
5759 /*
5760 * Unwind.
5761 */
5762 pVCpu->iem.s.cXcptRecursions--;
5763 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5764 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5765 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5766 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5767 pVCpu->iem.s.cXcptRecursions + 1));
5768 return rcStrict;
5769}
5770
5771#ifdef IEM_WITH_SETJMP
5772/**
5773 * See iemRaiseXcptOrInt. Will not return.
5774 */
5775IEM_STATIC DECL_NO_RETURN(void)
5776iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5777 uint8_t cbInstr,
5778 uint8_t u8Vector,
5779 uint32_t fFlags,
5780 uint16_t uErr,
5781 uint64_t uCr2)
5782{
5783 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5784 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5785}
5786#endif
5787
5788
5789/** \#DE - 00. */
5790DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5791{
5792 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5793}
5794
5795
5796/** \#DB - 01.
5797 * @note This automatically clear DR7.GD. */
5798DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5799{
5800 /** @todo set/clear RF. */
5801 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5802 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5803}
5804
5805
5806/** \#BR - 05. */
5807DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5808{
5809 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5810}
5811
5812
5813/** \#UD - 06. */
5814DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5815{
5816 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5817}
5818
5819
5820/** \#NM - 07. */
5821DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5822{
5823 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5824}
5825
5826
5827/** \#TS(err) - 0a. */
5828DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5829{
5830 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5831}
5832
5833
5834/** \#TS(tr) - 0a. */
5835DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5836{
5837 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5838 pVCpu->cpum.GstCtx.tr.Sel, 0);
5839}
5840
5841
5842/** \#TS(0) - 0a. */
5843DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5844{
5845 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5846 0, 0);
5847}
5848
5849
5850/** \#TS(err) - 0a. */
5851DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5852{
5853 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5854 uSel & X86_SEL_MASK_OFF_RPL, 0);
5855}
5856
5857
5858/** \#NP(err) - 0b. */
5859DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5860{
5861 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5862}
5863
5864
5865/** \#NP(sel) - 0b. */
5866DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5867{
5868 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5869 uSel & ~X86_SEL_RPL, 0);
5870}
5871
5872
5873/** \#SS(seg) - 0c. */
5874DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5875{
5876 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5877 uSel & ~X86_SEL_RPL, 0);
5878}
5879
5880
5881/** \#SS(err) - 0c. */
5882DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5883{
5884 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5885}
5886
5887
5888/** \#GP(n) - 0d. */
5889DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5890{
5891 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5892}
5893
5894
5895/** \#GP(0) - 0d. */
5896DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5897{
5898 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5899}
5900
5901#ifdef IEM_WITH_SETJMP
5902/** \#GP(0) - 0d. */
5903DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5904{
5905 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5906}
5907#endif
5908
5909
5910/** \#GP(sel) - 0d. */
5911DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5912{
5913 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5914 Sel & ~X86_SEL_RPL, 0);
5915}
5916
5917
5918/** \#GP(0) - 0d. */
5919DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5920{
5921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5922}
5923
5924
5925/** \#GP(sel) - 0d. */
5926DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5927{
5928 NOREF(iSegReg); NOREF(fAccess);
5929 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5930 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5931}
5932
5933#ifdef IEM_WITH_SETJMP
5934/** \#GP(sel) - 0d, longjmp. */
5935DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5936{
5937 NOREF(iSegReg); NOREF(fAccess);
5938 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5939 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5940}
5941#endif
5942
5943/** \#GP(sel) - 0d. */
5944DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5945{
5946 NOREF(Sel);
5947 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5948}
5949
5950#ifdef IEM_WITH_SETJMP
5951/** \#GP(sel) - 0d, longjmp. */
5952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5953{
5954 NOREF(Sel);
5955 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5956}
5957#endif
5958
5959
5960/** \#GP(sel) - 0d. */
5961DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5962{
5963 NOREF(iSegReg); NOREF(fAccess);
5964 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5965}
5966
5967#ifdef IEM_WITH_SETJMP
5968/** \#GP(sel) - 0d, longjmp. */
5969DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5970 uint32_t fAccess)
5971{
5972 NOREF(iSegReg); NOREF(fAccess);
5973 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5974}
5975#endif
5976
5977
5978/** \#PF(n) - 0e. */
5979DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5980{
5981 uint16_t uErr;
5982 switch (rc)
5983 {
5984 case VERR_PAGE_NOT_PRESENT:
5985 case VERR_PAGE_TABLE_NOT_PRESENT:
5986 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5987 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5988 uErr = 0;
5989 break;
5990
5991 default:
5992 AssertMsgFailed(("%Rrc\n", rc));
5993 RT_FALL_THRU();
5994 case VERR_ACCESS_DENIED:
5995 uErr = X86_TRAP_PF_P;
5996 break;
5997
5998 /** @todo reserved */
5999 }
6000
6001 if (pVCpu->iem.s.uCpl == 3)
6002 uErr |= X86_TRAP_PF_US;
6003
6004 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
6005 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6006 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
6007 uErr |= X86_TRAP_PF_ID;
6008
6009#if 0 /* This is so much non-sense, really. Why was it done like that? */
6010 /* Note! RW access callers reporting a WRITE protection fault, will clear
6011 the READ flag before calling. So, read-modify-write accesses (RW)
6012 can safely be reported as READ faults. */
6013 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6014 uErr |= X86_TRAP_PF_RW;
6015#else
6016 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6017 {
6018 if (!(fAccess & IEM_ACCESS_TYPE_READ))
6019 uErr |= X86_TRAP_PF_RW;
6020 }
6021#endif
6022
6023 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6024 uErr, GCPtrWhere);
6025}
6026
6027#ifdef IEM_WITH_SETJMP
6028/** \#PF(n) - 0e, longjmp. */
6029IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6030{
6031 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6032}
6033#endif
6034
6035
6036/** \#MF(0) - 10. */
6037DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6038{
6039 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6040}
6041
6042
6043/** \#AC(0) - 11. */
6044DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6045{
6046 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6047}
6048
6049
6050/**
6051 * Macro for calling iemCImplRaiseDivideError().
6052 *
6053 * This enables us to add/remove arguments and force different levels of
6054 * inlining as we wish.
6055 *
6056 * @return Strict VBox status code.
6057 */
6058#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6059IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6060{
6061 NOREF(cbInstr);
6062 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6063}
6064
6065
6066/**
6067 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6068 *
6069 * This enables us to add/remove arguments and force different levels of
6070 * inlining as we wish.
6071 *
6072 * @return Strict VBox status code.
6073 */
6074#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6075IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6076{
6077 NOREF(cbInstr);
6078 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6079}
6080
6081
6082/**
6083 * Macro for calling iemCImplRaiseInvalidOpcode().
6084 *
6085 * This enables us to add/remove arguments and force different levels of
6086 * inlining as we wish.
6087 *
6088 * @return Strict VBox status code.
6089 */
6090#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6091IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6092{
6093 NOREF(cbInstr);
6094 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6095}
6096
6097
6098/** @} */
6099
6100
6101/*
6102 *
6103 * Helpers routines.
6104 * Helpers routines.
6105 * Helpers routines.
6106 *
6107 */
6108
6109/**
6110 * Recalculates the effective operand size.
6111 *
6112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6113 */
6114IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6115{
6116 switch (pVCpu->iem.s.enmCpuMode)
6117 {
6118 case IEMMODE_16BIT:
6119 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6120 break;
6121 case IEMMODE_32BIT:
6122 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6123 break;
6124 case IEMMODE_64BIT:
6125 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6126 {
6127 case 0:
6128 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6129 break;
6130 case IEM_OP_PRF_SIZE_OP:
6131 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6132 break;
6133 case IEM_OP_PRF_SIZE_REX_W:
6134 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6135 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6136 break;
6137 }
6138 break;
6139 default:
6140 AssertFailed();
6141 }
6142}
6143
6144
6145/**
6146 * Sets the default operand size to 64-bit and recalculates the effective
6147 * operand size.
6148 *
6149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6150 */
6151IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6152{
6153 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6154 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6155 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6156 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6157 else
6158 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6159}
6160
6161
6162/*
6163 *
6164 * Common opcode decoders.
6165 * Common opcode decoders.
6166 * Common opcode decoders.
6167 *
6168 */
6169//#include <iprt/mem.h>
6170
6171/**
6172 * Used to add extra details about a stub case.
6173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6174 */
6175IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6176{
6177#if defined(LOG_ENABLED) && defined(IN_RING3)
6178 PVM pVM = pVCpu->CTX_SUFF(pVM);
6179 char szRegs[4096];
6180 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6181 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6182 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6183 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6184 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6185 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6186 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6187 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6188 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6189 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6190 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6191 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6192 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6193 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6194 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6195 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6196 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6197 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6198 " efer=%016VR{efer}\n"
6199 " pat=%016VR{pat}\n"
6200 " sf_mask=%016VR{sf_mask}\n"
6201 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6202 " lstar=%016VR{lstar}\n"
6203 " star=%016VR{star} cstar=%016VR{cstar}\n"
6204 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6205 );
6206
6207 char szInstr[256];
6208 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6209 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6210 szInstr, sizeof(szInstr), NULL);
6211
6212 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6213#else
6214 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6215#endif
6216}
6217
6218/**
6219 * Complains about a stub.
6220 *
6221 * Providing two versions of this macro, one for daily use and one for use when
6222 * working on IEM.
6223 */
6224#if 0
6225# define IEMOP_BITCH_ABOUT_STUB() \
6226 do { \
6227 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6228 iemOpStubMsg2(pVCpu); \
6229 RTAssertPanic(); \
6230 } while (0)
6231#else
6232# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6233#endif
6234
6235/** Stubs an opcode. */
6236#define FNIEMOP_STUB(a_Name) \
6237 FNIEMOP_DEF(a_Name) \
6238 { \
6239 RT_NOREF_PV(pVCpu); \
6240 IEMOP_BITCH_ABOUT_STUB(); \
6241 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6242 } \
6243 typedef int ignore_semicolon
6244
6245/** Stubs an opcode. */
6246#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6247 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6248 { \
6249 RT_NOREF_PV(pVCpu); \
6250 RT_NOREF_PV(a_Name0); \
6251 IEMOP_BITCH_ABOUT_STUB(); \
6252 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6253 } \
6254 typedef int ignore_semicolon
6255
6256/** Stubs an opcode which currently should raise \#UD. */
6257#define FNIEMOP_UD_STUB(a_Name) \
6258 FNIEMOP_DEF(a_Name) \
6259 { \
6260 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6261 return IEMOP_RAISE_INVALID_OPCODE(); \
6262 } \
6263 typedef int ignore_semicolon
6264
6265/** Stubs an opcode which currently should raise \#UD. */
6266#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6267 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6268 { \
6269 RT_NOREF_PV(pVCpu); \
6270 RT_NOREF_PV(a_Name0); \
6271 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6272 return IEMOP_RAISE_INVALID_OPCODE(); \
6273 } \
6274 typedef int ignore_semicolon
6275
6276
6277
6278/** @name Register Access.
6279 * @{
6280 */
6281
6282/**
6283 * Gets a reference (pointer) to the specified hidden segment register.
6284 *
6285 * @returns Hidden register reference.
6286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6287 * @param iSegReg The segment register.
6288 */
6289IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6290{
6291 Assert(iSegReg < X86_SREG_COUNT);
6292 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6293 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6294
6295#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6296 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6297 { /* likely */ }
6298 else
6299 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6300#else
6301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6302#endif
6303 return pSReg;
6304}
6305
6306
6307/**
6308 * Ensures that the given hidden segment register is up to date.
6309 *
6310 * @returns Hidden register reference.
6311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6312 * @param pSReg The segment register.
6313 */
6314IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6315{
6316#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6317 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6318 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6319#else
6320 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6321 NOREF(pVCpu);
6322#endif
6323 return pSReg;
6324}
6325
6326
6327/**
6328 * Gets a reference (pointer) to the specified segment register (the selector
6329 * value).
6330 *
6331 * @returns Pointer to the selector variable.
6332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6333 * @param iSegReg The segment register.
6334 */
6335DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6336{
6337 Assert(iSegReg < X86_SREG_COUNT);
6338 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6339 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6340}
6341
6342
6343/**
6344 * Fetches the selector value of a segment register.
6345 *
6346 * @returns The selector value.
6347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6348 * @param iSegReg The segment register.
6349 */
6350DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6351{
6352 Assert(iSegReg < X86_SREG_COUNT);
6353 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6354 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6355}
6356
6357
6358/**
6359 * Fetches the base address value of a segment register.
6360 *
6361 * @returns The selector value.
6362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6363 * @param iSegReg The segment register.
6364 */
6365DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6366{
6367 Assert(iSegReg < X86_SREG_COUNT);
6368 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6369 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6370}
6371
6372
6373/**
6374 * Gets a reference (pointer) to the specified general purpose register.
6375 *
6376 * @returns Register reference.
6377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6378 * @param iReg The general purpose register.
6379 */
6380DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6381{
6382 Assert(iReg < 16);
6383 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6384}
6385
6386
6387/**
6388 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6389 *
6390 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6391 *
6392 * @returns Register reference.
6393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6394 * @param iReg The register.
6395 */
6396DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6397{
6398 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6399 {
6400 Assert(iReg < 16);
6401 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6402 }
6403 /* high 8-bit register. */
6404 Assert(iReg < 8);
6405 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6406}
6407
6408
6409/**
6410 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6411 *
6412 * @returns Register reference.
6413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6414 * @param iReg The register.
6415 */
6416DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6417{
6418 Assert(iReg < 16);
6419 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6420}
6421
6422
6423/**
6424 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6425 *
6426 * @returns Register reference.
6427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6428 * @param iReg The register.
6429 */
6430DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6431{
6432 Assert(iReg < 16);
6433 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6434}
6435
6436
6437/**
6438 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6439 *
6440 * @returns Register reference.
6441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6442 * @param iReg The register.
6443 */
6444DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6445{
6446 Assert(iReg < 64);
6447 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6448}
6449
6450
6451/**
6452 * Gets a reference (pointer) to the specified segment register's base address.
6453 *
6454 * @returns Segment register base address reference.
6455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6456 * @param iSegReg The segment selector.
6457 */
6458DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6459{
6460 Assert(iSegReg < X86_SREG_COUNT);
6461 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6462 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6463}
6464
6465
6466/**
6467 * Fetches the value of a 8-bit general purpose register.
6468 *
6469 * @returns The register value.
6470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6471 * @param iReg The register.
6472 */
6473DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6474{
6475 return *iemGRegRefU8(pVCpu, iReg);
6476}
6477
6478
6479/**
6480 * Fetches the value of a 16-bit general purpose register.
6481 *
6482 * @returns The register value.
6483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6484 * @param iReg The register.
6485 */
6486DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6487{
6488 Assert(iReg < 16);
6489 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6490}
6491
6492
6493/**
6494 * Fetches the value of a 32-bit general purpose register.
6495 *
6496 * @returns The register value.
6497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6498 * @param iReg The register.
6499 */
6500DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6501{
6502 Assert(iReg < 16);
6503 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6504}
6505
6506
6507/**
6508 * Fetches the value of a 64-bit general purpose register.
6509 *
6510 * @returns The register value.
6511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6512 * @param iReg The register.
6513 */
6514DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6515{
6516 Assert(iReg < 16);
6517 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6518}
6519
6520
6521/**
6522 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6523 *
6524 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6525 * segment limit.
6526 *
6527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6528 * @param offNextInstr The offset of the next instruction.
6529 */
6530IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6531{
6532 switch (pVCpu->iem.s.enmEffOpSize)
6533 {
6534 case IEMMODE_16BIT:
6535 {
6536 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6537 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6538 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6539 return iemRaiseGeneralProtectionFault0(pVCpu);
6540 pVCpu->cpum.GstCtx.rip = uNewIp;
6541 break;
6542 }
6543
6544 case IEMMODE_32BIT:
6545 {
6546 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6547 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6548
6549 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6550 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6551 return iemRaiseGeneralProtectionFault0(pVCpu);
6552 pVCpu->cpum.GstCtx.rip = uNewEip;
6553 break;
6554 }
6555
6556 case IEMMODE_64BIT:
6557 {
6558 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6559
6560 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6561 if (!IEM_IS_CANONICAL(uNewRip))
6562 return iemRaiseGeneralProtectionFault0(pVCpu);
6563 pVCpu->cpum.GstCtx.rip = uNewRip;
6564 break;
6565 }
6566
6567 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6568 }
6569
6570 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6571
6572#ifndef IEM_WITH_CODE_TLB
6573 /* Flush the prefetch buffer. */
6574 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6575#endif
6576
6577 return VINF_SUCCESS;
6578}
6579
6580
6581/**
6582 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6583 *
6584 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6585 * segment limit.
6586 *
6587 * @returns Strict VBox status code.
6588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6589 * @param offNextInstr The offset of the next instruction.
6590 */
6591IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6592{
6593 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6594
6595 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6596 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6597 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6598 return iemRaiseGeneralProtectionFault0(pVCpu);
6599 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6600 pVCpu->cpum.GstCtx.rip = uNewIp;
6601 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6602
6603#ifndef IEM_WITH_CODE_TLB
6604 /* Flush the prefetch buffer. */
6605 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6606#endif
6607
6608 return VINF_SUCCESS;
6609}
6610
6611
6612/**
6613 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6614 *
6615 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6616 * segment limit.
6617 *
6618 * @returns Strict VBox status code.
6619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6620 * @param offNextInstr The offset of the next instruction.
6621 */
6622IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6623{
6624 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6625
6626 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6627 {
6628 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6629
6630 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6631 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6632 return iemRaiseGeneralProtectionFault0(pVCpu);
6633 pVCpu->cpum.GstCtx.rip = uNewEip;
6634 }
6635 else
6636 {
6637 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6638
6639 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6640 if (!IEM_IS_CANONICAL(uNewRip))
6641 return iemRaiseGeneralProtectionFault0(pVCpu);
6642 pVCpu->cpum.GstCtx.rip = uNewRip;
6643 }
6644 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6645
6646#ifndef IEM_WITH_CODE_TLB
6647 /* Flush the prefetch buffer. */
6648 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6649#endif
6650
6651 return VINF_SUCCESS;
6652}
6653
6654
6655/**
6656 * Performs a near jump to the specified address.
6657 *
6658 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6659 * segment limit.
6660 *
6661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6662 * @param uNewRip The new RIP value.
6663 */
6664IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6665{
6666 switch (pVCpu->iem.s.enmEffOpSize)
6667 {
6668 case IEMMODE_16BIT:
6669 {
6670 Assert(uNewRip <= UINT16_MAX);
6671 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6672 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6673 return iemRaiseGeneralProtectionFault0(pVCpu);
6674 /** @todo Test 16-bit jump in 64-bit mode. */
6675 pVCpu->cpum.GstCtx.rip = uNewRip;
6676 break;
6677 }
6678
6679 case IEMMODE_32BIT:
6680 {
6681 Assert(uNewRip <= UINT32_MAX);
6682 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6683 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6684
6685 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6686 return iemRaiseGeneralProtectionFault0(pVCpu);
6687 pVCpu->cpum.GstCtx.rip = uNewRip;
6688 break;
6689 }
6690
6691 case IEMMODE_64BIT:
6692 {
6693 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6694
6695 if (!IEM_IS_CANONICAL(uNewRip))
6696 return iemRaiseGeneralProtectionFault0(pVCpu);
6697 pVCpu->cpum.GstCtx.rip = uNewRip;
6698 break;
6699 }
6700
6701 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6702 }
6703
6704 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6705
6706#ifndef IEM_WITH_CODE_TLB
6707 /* Flush the prefetch buffer. */
6708 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6709#endif
6710
6711 return VINF_SUCCESS;
6712}
6713
6714
6715/**
6716 * Get the address of the top of the stack.
6717 *
6718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6719 */
6720DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6721{
6722 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6723 return pVCpu->cpum.GstCtx.rsp;
6724 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6725 return pVCpu->cpum.GstCtx.esp;
6726 return pVCpu->cpum.GstCtx.sp;
6727}
6728
6729
6730/**
6731 * Updates the RIP/EIP/IP to point to the next instruction.
6732 *
6733 * This function leaves the EFLAGS.RF flag alone.
6734 *
6735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6736 * @param cbInstr The number of bytes to add.
6737 */
6738IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6739{
6740 switch (pVCpu->iem.s.enmCpuMode)
6741 {
6742 case IEMMODE_16BIT:
6743 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6744 pVCpu->cpum.GstCtx.eip += cbInstr;
6745 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6746 break;
6747
6748 case IEMMODE_32BIT:
6749 pVCpu->cpum.GstCtx.eip += cbInstr;
6750 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6751 break;
6752
6753 case IEMMODE_64BIT:
6754 pVCpu->cpum.GstCtx.rip += cbInstr;
6755 break;
6756 default: AssertFailed();
6757 }
6758}
6759
6760
6761#if 0
6762/**
6763 * Updates the RIP/EIP/IP to point to the next instruction.
6764 *
6765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6766 */
6767IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6768{
6769 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6770}
6771#endif
6772
6773
6774
6775/**
6776 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6777 *
6778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6779 * @param cbInstr The number of bytes to add.
6780 */
6781IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6782{
6783 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6784
6785 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6786#if ARCH_BITS >= 64
6787 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6788 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6789 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6790#else
6791 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6792 pVCpu->cpum.GstCtx.rip += cbInstr;
6793 else
6794 pVCpu->cpum.GstCtx.eip += cbInstr;
6795#endif
6796}
6797
6798
6799/**
6800 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6801 *
6802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6803 */
6804IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6805{
6806 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6807}
6808
6809
6810/**
6811 * Adds to the stack pointer.
6812 *
6813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6814 * @param cbToAdd The number of bytes to add (8-bit!).
6815 */
6816DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6817{
6818 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6819 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6820 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6821 pVCpu->cpum.GstCtx.esp += cbToAdd;
6822 else
6823 pVCpu->cpum.GstCtx.sp += cbToAdd;
6824}
6825
6826
6827/**
6828 * Subtracts from the stack pointer.
6829 *
6830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6831 * @param cbToSub The number of bytes to subtract (8-bit!).
6832 */
6833DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6834{
6835 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6836 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6837 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6838 pVCpu->cpum.GstCtx.esp -= cbToSub;
6839 else
6840 pVCpu->cpum.GstCtx.sp -= cbToSub;
6841}
6842
6843
6844/**
6845 * Adds to the temporary stack pointer.
6846 *
6847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6848 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6849 * @param cbToAdd The number of bytes to add (16-bit).
6850 */
6851DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6852{
6853 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6854 pTmpRsp->u += cbToAdd;
6855 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6856 pTmpRsp->DWords.dw0 += cbToAdd;
6857 else
6858 pTmpRsp->Words.w0 += cbToAdd;
6859}
6860
6861
6862/**
6863 * Subtracts from the temporary stack pointer.
6864 *
6865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6866 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6867 * @param cbToSub The number of bytes to subtract.
6868 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6869 * expecting that.
6870 */
6871DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6872{
6873 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6874 pTmpRsp->u -= cbToSub;
6875 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6876 pTmpRsp->DWords.dw0 -= cbToSub;
6877 else
6878 pTmpRsp->Words.w0 -= cbToSub;
6879}
6880
6881
6882/**
6883 * Calculates the effective stack address for a push of the specified size as
6884 * well as the new RSP value (upper bits may be masked).
6885 *
6886 * @returns Effective stack addressf for the push.
6887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6888 * @param cbItem The size of the stack item to pop.
6889 * @param puNewRsp Where to return the new RSP value.
6890 */
6891DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6892{
6893 RTUINT64U uTmpRsp;
6894 RTGCPTR GCPtrTop;
6895 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6896
6897 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6898 GCPtrTop = uTmpRsp.u -= cbItem;
6899 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6900 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6901 else
6902 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6903 *puNewRsp = uTmpRsp.u;
6904 return GCPtrTop;
6905}
6906
6907
6908/**
6909 * Gets the current stack pointer and calculates the value after a pop of the
6910 * specified size.
6911 *
6912 * @returns Current stack pointer.
6913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6914 * @param cbItem The size of the stack item to pop.
6915 * @param puNewRsp Where to return the new RSP value.
6916 */
6917DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6918{
6919 RTUINT64U uTmpRsp;
6920 RTGCPTR GCPtrTop;
6921 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6922
6923 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6924 {
6925 GCPtrTop = uTmpRsp.u;
6926 uTmpRsp.u += cbItem;
6927 }
6928 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6929 {
6930 GCPtrTop = uTmpRsp.DWords.dw0;
6931 uTmpRsp.DWords.dw0 += cbItem;
6932 }
6933 else
6934 {
6935 GCPtrTop = uTmpRsp.Words.w0;
6936 uTmpRsp.Words.w0 += cbItem;
6937 }
6938 *puNewRsp = uTmpRsp.u;
6939 return GCPtrTop;
6940}
6941
6942
6943/**
6944 * Calculates the effective stack address for a push of the specified size as
6945 * well as the new temporary RSP value (upper bits may be masked).
6946 *
6947 * @returns Effective stack addressf for the push.
6948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6949 * @param pTmpRsp The temporary stack pointer. This is updated.
6950 * @param cbItem The size of the stack item to pop.
6951 */
6952DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6953{
6954 RTGCPTR GCPtrTop;
6955
6956 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6957 GCPtrTop = pTmpRsp->u -= cbItem;
6958 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6959 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6960 else
6961 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6962 return GCPtrTop;
6963}
6964
6965
6966/**
6967 * Gets the effective stack address for a pop of the specified size and
6968 * calculates and updates the temporary RSP.
6969 *
6970 * @returns Current stack pointer.
6971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6972 * @param pTmpRsp The temporary stack pointer. This is updated.
6973 * @param cbItem The size of the stack item to pop.
6974 */
6975DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6976{
6977 RTGCPTR GCPtrTop;
6978 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6979 {
6980 GCPtrTop = pTmpRsp->u;
6981 pTmpRsp->u += cbItem;
6982 }
6983 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6984 {
6985 GCPtrTop = pTmpRsp->DWords.dw0;
6986 pTmpRsp->DWords.dw0 += cbItem;
6987 }
6988 else
6989 {
6990 GCPtrTop = pTmpRsp->Words.w0;
6991 pTmpRsp->Words.w0 += cbItem;
6992 }
6993 return GCPtrTop;
6994}
6995
6996/** @} */
6997
6998
6999/** @name FPU access and helpers.
7000 *
7001 * @{
7002 */
7003
7004
7005/**
7006 * Hook for preparing to use the host FPU.
7007 *
7008 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7009 *
7010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7011 */
7012DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
7013{
7014#ifdef IN_RING3
7015 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7016#else
7017 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7018#endif
7019 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7020}
7021
7022
7023/**
7024 * Hook for preparing to use the host FPU for SSE.
7025 *
7026 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7027 *
7028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7029 */
7030DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
7031{
7032 iemFpuPrepareUsage(pVCpu);
7033}
7034
7035
7036/**
7037 * Hook for preparing to use the host FPU for AVX.
7038 *
7039 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7040 *
7041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7042 */
7043DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7044{
7045 iemFpuPrepareUsage(pVCpu);
7046}
7047
7048
7049/**
7050 * Hook for actualizing the guest FPU state before the interpreter reads it.
7051 *
7052 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7053 *
7054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7055 */
7056DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7057{
7058#ifdef IN_RING3
7059 NOREF(pVCpu);
7060#else
7061 CPUMRZFpuStateActualizeForRead(pVCpu);
7062#endif
7063 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7064}
7065
7066
7067/**
7068 * Hook for actualizing the guest FPU state before the interpreter changes it.
7069 *
7070 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7071 *
7072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7073 */
7074DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7075{
7076#ifdef IN_RING3
7077 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7078#else
7079 CPUMRZFpuStateActualizeForChange(pVCpu);
7080#endif
7081 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7082}
7083
7084
7085/**
7086 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7087 * only.
7088 *
7089 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7090 *
7091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7092 */
7093DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7094{
7095#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7096 NOREF(pVCpu);
7097#else
7098 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7099#endif
7100 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7101}
7102
7103
7104/**
7105 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7106 * read+write.
7107 *
7108 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7109 *
7110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7111 */
7112DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7113{
7114#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7115 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7116#else
7117 CPUMRZFpuStateActualizeForChange(pVCpu);
7118#endif
7119 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7120}
7121
7122
7123/**
7124 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7125 * only.
7126 *
7127 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7128 *
7129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7130 */
7131DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7132{
7133#ifdef IN_RING3
7134 NOREF(pVCpu);
7135#else
7136 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7137#endif
7138 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7139}
7140
7141
7142/**
7143 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7144 * read+write.
7145 *
7146 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7147 *
7148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7149 */
7150DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7151{
7152#ifdef IN_RING3
7153 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7154#else
7155 CPUMRZFpuStateActualizeForChange(pVCpu);
7156#endif
7157 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7158}
7159
7160
7161/**
7162 * Stores a QNaN value into a FPU register.
7163 *
7164 * @param pReg Pointer to the register.
7165 */
7166DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7167{
7168 pReg->au32[0] = UINT32_C(0x00000000);
7169 pReg->au32[1] = UINT32_C(0xc0000000);
7170 pReg->au16[4] = UINT16_C(0xffff);
7171}
7172
7173
7174/**
7175 * Updates the FOP, FPU.CS and FPUIP registers.
7176 *
7177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7178 * @param pFpuCtx The FPU context.
7179 */
7180DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7181{
7182 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7183 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7184 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7185 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7186 {
7187 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7188 * happens in real mode here based on the fnsave and fnstenv images. */
7189 pFpuCtx->CS = 0;
7190 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7191 }
7192 else
7193 {
7194 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7195 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7196 }
7197}
7198
7199
7200/**
7201 * Updates the x87.DS and FPUDP registers.
7202 *
7203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7204 * @param pFpuCtx The FPU context.
7205 * @param iEffSeg The effective segment register.
7206 * @param GCPtrEff The effective address relative to @a iEffSeg.
7207 */
7208DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7209{
7210 RTSEL sel;
7211 switch (iEffSeg)
7212 {
7213 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7214 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7215 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7216 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7217 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7218 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7219 default:
7220 AssertMsgFailed(("%d\n", iEffSeg));
7221 sel = pVCpu->cpum.GstCtx.ds.Sel;
7222 }
7223 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7224 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7225 {
7226 pFpuCtx->DS = 0;
7227 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7228 }
7229 else
7230 {
7231 pFpuCtx->DS = sel;
7232 pFpuCtx->FPUDP = GCPtrEff;
7233 }
7234}
7235
7236
7237/**
7238 * Rotates the stack registers in the push direction.
7239 *
7240 * @param pFpuCtx The FPU context.
7241 * @remarks This is a complete waste of time, but fxsave stores the registers in
7242 * stack order.
7243 */
7244DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7245{
7246 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7247 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7248 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7249 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7250 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7251 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7252 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7253 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7254 pFpuCtx->aRegs[0].r80 = r80Tmp;
7255}
7256
7257
7258/**
7259 * Rotates the stack registers in the pop direction.
7260 *
7261 * @param pFpuCtx The FPU context.
7262 * @remarks This is a complete waste of time, but fxsave stores the registers in
7263 * stack order.
7264 */
7265DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7266{
7267 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7268 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7269 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7270 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7271 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7272 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7273 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7274 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7275 pFpuCtx->aRegs[7].r80 = r80Tmp;
7276}
7277
7278
7279/**
7280 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7281 * exception prevents it.
7282 *
7283 * @param pResult The FPU operation result to push.
7284 * @param pFpuCtx The FPU context.
7285 */
7286IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7287{
7288 /* Update FSW and bail if there are pending exceptions afterwards. */
7289 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7290 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7291 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7292 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7293 {
7294 pFpuCtx->FSW = fFsw;
7295 return;
7296 }
7297
7298 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7299 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7300 {
7301 /* All is fine, push the actual value. */
7302 pFpuCtx->FTW |= RT_BIT(iNewTop);
7303 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7304 }
7305 else if (pFpuCtx->FCW & X86_FCW_IM)
7306 {
7307 /* Masked stack overflow, push QNaN. */
7308 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7309 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7310 }
7311 else
7312 {
7313 /* Raise stack overflow, don't push anything. */
7314 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7315 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7316 return;
7317 }
7318
7319 fFsw &= ~X86_FSW_TOP_MASK;
7320 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7321 pFpuCtx->FSW = fFsw;
7322
7323 iemFpuRotateStackPush(pFpuCtx);
7324}
7325
7326
7327/**
7328 * Stores a result in a FPU register and updates the FSW and FTW.
7329 *
7330 * @param pFpuCtx The FPU context.
7331 * @param pResult The result to store.
7332 * @param iStReg Which FPU register to store it in.
7333 */
7334IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7335{
7336 Assert(iStReg < 8);
7337 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7338 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7339 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7340 pFpuCtx->FTW |= RT_BIT(iReg);
7341 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7342}
7343
7344
7345/**
7346 * Only updates the FPU status word (FSW) with the result of the current
7347 * instruction.
7348 *
7349 * @param pFpuCtx The FPU context.
7350 * @param u16FSW The FSW output of the current instruction.
7351 */
7352IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7353{
7354 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7355 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7356}
7357
7358
7359/**
7360 * Pops one item off the FPU stack if no pending exception prevents it.
7361 *
7362 * @param pFpuCtx The FPU context.
7363 */
7364IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7365{
7366 /* Check pending exceptions. */
7367 uint16_t uFSW = pFpuCtx->FSW;
7368 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7369 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7370 return;
7371
7372 /* TOP--. */
7373 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7374 uFSW &= ~X86_FSW_TOP_MASK;
7375 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7376 pFpuCtx->FSW = uFSW;
7377
7378 /* Mark the previous ST0 as empty. */
7379 iOldTop >>= X86_FSW_TOP_SHIFT;
7380 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7381
7382 /* Rotate the registers. */
7383 iemFpuRotateStackPop(pFpuCtx);
7384}
7385
7386
7387/**
7388 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7389 *
7390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7391 * @param pResult The FPU operation result to push.
7392 */
7393IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7394{
7395 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7396 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7397 iemFpuMaybePushResult(pResult, pFpuCtx);
7398}
7399
7400
7401/**
7402 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7403 * and sets FPUDP and FPUDS.
7404 *
7405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7406 * @param pResult The FPU operation result to push.
7407 * @param iEffSeg The effective segment register.
7408 * @param GCPtrEff The effective address relative to @a iEffSeg.
7409 */
7410IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7411{
7412 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7413 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7414 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7415 iemFpuMaybePushResult(pResult, pFpuCtx);
7416}
7417
7418
7419/**
7420 * Replace ST0 with the first value and push the second onto the FPU stack,
7421 * unless a pending exception prevents it.
7422 *
7423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7424 * @param pResult The FPU operation result to store and push.
7425 */
7426IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7427{
7428 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7429 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7430
7431 /* Update FSW and bail if there are pending exceptions afterwards. */
7432 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7433 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7434 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7435 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7436 {
7437 pFpuCtx->FSW = fFsw;
7438 return;
7439 }
7440
7441 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7442 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7443 {
7444 /* All is fine, push the actual value. */
7445 pFpuCtx->FTW |= RT_BIT(iNewTop);
7446 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7447 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7448 }
7449 else if (pFpuCtx->FCW & X86_FCW_IM)
7450 {
7451 /* Masked stack overflow, push QNaN. */
7452 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7453 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7454 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7455 }
7456 else
7457 {
7458 /* Raise stack overflow, don't push anything. */
7459 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7460 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7461 return;
7462 }
7463
7464 fFsw &= ~X86_FSW_TOP_MASK;
7465 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7466 pFpuCtx->FSW = fFsw;
7467
7468 iemFpuRotateStackPush(pFpuCtx);
7469}
7470
7471
7472/**
7473 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7474 * FOP.
7475 *
7476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7477 * @param pResult The result to store.
7478 * @param iStReg Which FPU register to store it in.
7479 */
7480IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7481{
7482 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7483 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7484 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7485}
7486
7487
7488/**
7489 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7490 * FOP, and then pops the stack.
7491 *
7492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7493 * @param pResult The result to store.
7494 * @param iStReg Which FPU register to store it in.
7495 */
7496IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7497{
7498 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7499 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7500 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7501 iemFpuMaybePopOne(pFpuCtx);
7502}
7503
7504
7505/**
7506 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7507 * FPUDP, and FPUDS.
7508 *
7509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7510 * @param pResult The result to store.
7511 * @param iStReg Which FPU register to store it in.
7512 * @param iEffSeg The effective memory operand selector register.
7513 * @param GCPtrEff The effective memory operand offset.
7514 */
7515IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7516 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7517{
7518 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7519 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7520 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7521 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7522}
7523
7524
7525/**
7526 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7527 * FPUDP, and FPUDS, and then pops the stack.
7528 *
7529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7530 * @param pResult The result to store.
7531 * @param iStReg Which FPU register to store it in.
7532 * @param iEffSeg The effective memory operand selector register.
7533 * @param GCPtrEff The effective memory operand offset.
7534 */
7535IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7536 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7537{
7538 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7539 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7540 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7541 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7542 iemFpuMaybePopOne(pFpuCtx);
7543}
7544
7545
7546/**
7547 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7548 *
7549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7550 */
7551IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7552{
7553 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7554 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7555}
7556
7557
7558/**
7559 * Marks the specified stack register as free (for FFREE).
7560 *
7561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7562 * @param iStReg The register to free.
7563 */
7564IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7565{
7566 Assert(iStReg < 8);
7567 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7568 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7569 pFpuCtx->FTW &= ~RT_BIT(iReg);
7570}
7571
7572
7573/**
7574 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7575 *
7576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7577 */
7578IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7579{
7580 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7581 uint16_t uFsw = pFpuCtx->FSW;
7582 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7583 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7584 uFsw &= ~X86_FSW_TOP_MASK;
7585 uFsw |= uTop;
7586 pFpuCtx->FSW = uFsw;
7587}
7588
7589
7590/**
7591 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7592 *
7593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7594 */
7595IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7596{
7597 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7598 uint16_t uFsw = pFpuCtx->FSW;
7599 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7600 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7601 uFsw &= ~X86_FSW_TOP_MASK;
7602 uFsw |= uTop;
7603 pFpuCtx->FSW = uFsw;
7604}
7605
7606
7607/**
7608 * Updates the FSW, FOP, FPUIP, and FPUCS.
7609 *
7610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7611 * @param u16FSW The FSW from the current instruction.
7612 */
7613IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7614{
7615 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7616 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7617 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7618}
7619
7620
7621/**
7622 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7623 *
7624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7625 * @param u16FSW The FSW from the current instruction.
7626 */
7627IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7628{
7629 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7630 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7631 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7632 iemFpuMaybePopOne(pFpuCtx);
7633}
7634
7635
7636/**
7637 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7638 *
7639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7640 * @param u16FSW The FSW from the current instruction.
7641 * @param iEffSeg The effective memory operand selector register.
7642 * @param GCPtrEff The effective memory operand offset.
7643 */
7644IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7645{
7646 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7647 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7648 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7649 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7650}
7651
7652
7653/**
7654 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7655 *
7656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7657 * @param u16FSW The FSW from the current instruction.
7658 */
7659IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7660{
7661 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7662 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7663 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7664 iemFpuMaybePopOne(pFpuCtx);
7665 iemFpuMaybePopOne(pFpuCtx);
7666}
7667
7668
7669/**
7670 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7671 *
7672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7673 * @param u16FSW The FSW from the current instruction.
7674 * @param iEffSeg The effective memory operand selector register.
7675 * @param GCPtrEff The effective memory operand offset.
7676 */
7677IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7678{
7679 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7680 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7681 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7682 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7683 iemFpuMaybePopOne(pFpuCtx);
7684}
7685
7686
7687/**
7688 * Worker routine for raising an FPU stack underflow exception.
7689 *
7690 * @param pFpuCtx The FPU context.
7691 * @param iStReg The stack register being accessed.
7692 */
7693IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7694{
7695 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7696 if (pFpuCtx->FCW & X86_FCW_IM)
7697 {
7698 /* Masked underflow. */
7699 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7700 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7701 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7702 if (iStReg != UINT8_MAX)
7703 {
7704 pFpuCtx->FTW |= RT_BIT(iReg);
7705 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7706 }
7707 }
7708 else
7709 {
7710 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7711 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7712 }
7713}
7714
7715
7716/**
7717 * Raises a FPU stack underflow exception.
7718 *
7719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7720 * @param iStReg The destination register that should be loaded
7721 * with QNaN if \#IS is not masked. Specify
7722 * UINT8_MAX if none (like for fcom).
7723 */
7724DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7725{
7726 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7727 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7728 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7729}
7730
7731
7732DECL_NO_INLINE(IEM_STATIC, void)
7733iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7734{
7735 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7736 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7737 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7738 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7739}
7740
7741
7742DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7743{
7744 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7745 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7746 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7747 iemFpuMaybePopOne(pFpuCtx);
7748}
7749
7750
7751DECL_NO_INLINE(IEM_STATIC, void)
7752iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7753{
7754 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7755 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7756 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7757 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7758 iemFpuMaybePopOne(pFpuCtx);
7759}
7760
7761
7762DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7763{
7764 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7765 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7766 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7767 iemFpuMaybePopOne(pFpuCtx);
7768 iemFpuMaybePopOne(pFpuCtx);
7769}
7770
7771
7772DECL_NO_INLINE(IEM_STATIC, void)
7773iemFpuStackPushUnderflow(PVMCPU pVCpu)
7774{
7775 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7776 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7777
7778 if (pFpuCtx->FCW & X86_FCW_IM)
7779 {
7780 /* Masked overflow - Push QNaN. */
7781 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7782 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7783 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7784 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7785 pFpuCtx->FTW |= RT_BIT(iNewTop);
7786 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7787 iemFpuRotateStackPush(pFpuCtx);
7788 }
7789 else
7790 {
7791 /* Exception pending - don't change TOP or the register stack. */
7792 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7793 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7794 }
7795}
7796
7797
7798DECL_NO_INLINE(IEM_STATIC, void)
7799iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7800{
7801 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7802 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7803
7804 if (pFpuCtx->FCW & X86_FCW_IM)
7805 {
7806 /* Masked overflow - Push QNaN. */
7807 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7808 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7809 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7810 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7811 pFpuCtx->FTW |= RT_BIT(iNewTop);
7812 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7813 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7814 iemFpuRotateStackPush(pFpuCtx);
7815 }
7816 else
7817 {
7818 /* Exception pending - don't change TOP or the register stack. */
7819 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7820 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7821 }
7822}
7823
7824
7825/**
7826 * Worker routine for raising an FPU stack overflow exception on a push.
7827 *
7828 * @param pFpuCtx The FPU context.
7829 */
7830IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7831{
7832 if (pFpuCtx->FCW & X86_FCW_IM)
7833 {
7834 /* Masked overflow. */
7835 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7836 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7837 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7838 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7839 pFpuCtx->FTW |= RT_BIT(iNewTop);
7840 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7841 iemFpuRotateStackPush(pFpuCtx);
7842 }
7843 else
7844 {
7845 /* Exception pending - don't change TOP or the register stack. */
7846 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7847 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7848 }
7849}
7850
7851
7852/**
7853 * Raises a FPU stack overflow exception on a push.
7854 *
7855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7856 */
7857DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7858{
7859 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7860 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7861 iemFpuStackPushOverflowOnly(pFpuCtx);
7862}
7863
7864
7865/**
7866 * Raises a FPU stack overflow exception on a push with a memory operand.
7867 *
7868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7869 * @param iEffSeg The effective memory operand selector register.
7870 * @param GCPtrEff The effective memory operand offset.
7871 */
7872DECL_NO_INLINE(IEM_STATIC, void)
7873iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7874{
7875 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7876 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7877 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7878 iemFpuStackPushOverflowOnly(pFpuCtx);
7879}
7880
7881
7882IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7883{
7884 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7885 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7886 if (pFpuCtx->FTW & RT_BIT(iReg))
7887 return VINF_SUCCESS;
7888 return VERR_NOT_FOUND;
7889}
7890
7891
7892IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7893{
7894 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7895 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7896 if (pFpuCtx->FTW & RT_BIT(iReg))
7897 {
7898 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7899 return VINF_SUCCESS;
7900 }
7901 return VERR_NOT_FOUND;
7902}
7903
7904
7905IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7906 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7907{
7908 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7909 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7910 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7911 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7912 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7913 {
7914 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7915 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7916 return VINF_SUCCESS;
7917 }
7918 return VERR_NOT_FOUND;
7919}
7920
7921
7922IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7923{
7924 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7925 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7926 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7927 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7928 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7929 {
7930 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7931 return VINF_SUCCESS;
7932 }
7933 return VERR_NOT_FOUND;
7934}
7935
7936
7937/**
7938 * Updates the FPU exception status after FCW is changed.
7939 *
7940 * @param pFpuCtx The FPU context.
7941 */
7942IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7943{
7944 uint16_t u16Fsw = pFpuCtx->FSW;
7945 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7946 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7947 else
7948 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7949 pFpuCtx->FSW = u16Fsw;
7950}
7951
7952
7953/**
7954 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7955 *
7956 * @returns The full FTW.
7957 * @param pFpuCtx The FPU context.
7958 */
7959IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7960{
7961 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7962 uint16_t u16Ftw = 0;
7963 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7964 for (unsigned iSt = 0; iSt < 8; iSt++)
7965 {
7966 unsigned const iReg = (iSt + iTop) & 7;
7967 if (!(u8Ftw & RT_BIT(iReg)))
7968 u16Ftw |= 3 << (iReg * 2); /* empty */
7969 else
7970 {
7971 uint16_t uTag;
7972 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7973 if (pr80Reg->s.uExponent == 0x7fff)
7974 uTag = 2; /* Exponent is all 1's => Special. */
7975 else if (pr80Reg->s.uExponent == 0x0000)
7976 {
7977 if (pr80Reg->s.u64Mantissa == 0x0000)
7978 uTag = 1; /* All bits are zero => Zero. */
7979 else
7980 uTag = 2; /* Must be special. */
7981 }
7982 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7983 uTag = 0; /* Valid. */
7984 else
7985 uTag = 2; /* Must be special. */
7986
7987 u16Ftw |= uTag << (iReg * 2); /* empty */
7988 }
7989 }
7990
7991 return u16Ftw;
7992}
7993
7994
7995/**
7996 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7997 *
7998 * @returns The compressed FTW.
7999 * @param u16FullFtw The full FTW to convert.
8000 */
8001IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
8002{
8003 uint8_t u8Ftw = 0;
8004 for (unsigned i = 0; i < 8; i++)
8005 {
8006 if ((u16FullFtw & 3) != 3 /*empty*/)
8007 u8Ftw |= RT_BIT(i);
8008 u16FullFtw >>= 2;
8009 }
8010
8011 return u8Ftw;
8012}
8013
8014/** @} */
8015
8016
8017/** @name Memory access.
8018 *
8019 * @{
8020 */
8021
8022
8023/**
8024 * Updates the IEMCPU::cbWritten counter if applicable.
8025 *
8026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8027 * @param fAccess The access being accounted for.
8028 * @param cbMem The access size.
8029 */
8030DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
8031{
8032 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8033 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8034 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8035}
8036
8037
8038/**
8039 * Checks if the given segment can be written to, raise the appropriate
8040 * exception if not.
8041 *
8042 * @returns VBox strict status code.
8043 *
8044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8045 * @param pHid Pointer to the hidden register.
8046 * @param iSegReg The register number.
8047 * @param pu64BaseAddr Where to return the base address to use for the
8048 * segment. (In 64-bit code it may differ from the
8049 * base in the hidden segment.)
8050 */
8051IEM_STATIC VBOXSTRICTRC
8052iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8053{
8054 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8055
8056 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8057 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8058 else
8059 {
8060 if (!pHid->Attr.n.u1Present)
8061 {
8062 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8063 AssertRelease(uSel == 0);
8064 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8065 return iemRaiseGeneralProtectionFault0(pVCpu);
8066 }
8067
8068 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8069 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8070 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8071 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8072 *pu64BaseAddr = pHid->u64Base;
8073 }
8074 return VINF_SUCCESS;
8075}
8076
8077
8078/**
8079 * Checks if the given segment can be read from, raise the appropriate
8080 * exception if not.
8081 *
8082 * @returns VBox strict status code.
8083 *
8084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8085 * @param pHid Pointer to the hidden register.
8086 * @param iSegReg The register number.
8087 * @param pu64BaseAddr Where to return the base address to use for the
8088 * segment. (In 64-bit code it may differ from the
8089 * base in the hidden segment.)
8090 */
8091IEM_STATIC VBOXSTRICTRC
8092iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8093{
8094 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8095
8096 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8097 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8098 else
8099 {
8100 if (!pHid->Attr.n.u1Present)
8101 {
8102 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8103 AssertRelease(uSel == 0);
8104 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8105 return iemRaiseGeneralProtectionFault0(pVCpu);
8106 }
8107
8108 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8109 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8110 *pu64BaseAddr = pHid->u64Base;
8111 }
8112 return VINF_SUCCESS;
8113}
8114
8115
8116/**
8117 * Applies the segment limit, base and attributes.
8118 *
8119 * This may raise a \#GP or \#SS.
8120 *
8121 * @returns VBox strict status code.
8122 *
8123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8124 * @param fAccess The kind of access which is being performed.
8125 * @param iSegReg The index of the segment register to apply.
8126 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8127 * TSS, ++).
8128 * @param cbMem The access size.
8129 * @param pGCPtrMem Pointer to the guest memory address to apply
8130 * segmentation to. Input and output parameter.
8131 */
8132IEM_STATIC VBOXSTRICTRC
8133iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8134{
8135 if (iSegReg == UINT8_MAX)
8136 return VINF_SUCCESS;
8137
8138 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8139 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8140 switch (pVCpu->iem.s.enmCpuMode)
8141 {
8142 case IEMMODE_16BIT:
8143 case IEMMODE_32BIT:
8144 {
8145 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8146 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8147
8148 if ( pSel->Attr.n.u1Present
8149 && !pSel->Attr.n.u1Unusable)
8150 {
8151 Assert(pSel->Attr.n.u1DescType);
8152 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8153 {
8154 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8155 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8156 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8157
8158 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8159 {
8160 /** @todo CPL check. */
8161 }
8162
8163 /*
8164 * There are two kinds of data selectors, normal and expand down.
8165 */
8166 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8167 {
8168 if ( GCPtrFirst32 > pSel->u32Limit
8169 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8170 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8171 }
8172 else
8173 {
8174 /*
8175 * The upper boundary is defined by the B bit, not the G bit!
8176 */
8177 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8178 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8179 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8180 }
8181 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8182 }
8183 else
8184 {
8185
8186 /*
8187 * Code selector and usually be used to read thru, writing is
8188 * only permitted in real and V8086 mode.
8189 */
8190 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8191 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8192 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8193 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8194 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8195
8196 if ( GCPtrFirst32 > pSel->u32Limit
8197 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8198 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8199
8200 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8201 {
8202 /** @todo CPL check. */
8203 }
8204
8205 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8206 }
8207 }
8208 else
8209 return iemRaiseGeneralProtectionFault0(pVCpu);
8210 return VINF_SUCCESS;
8211 }
8212
8213 case IEMMODE_64BIT:
8214 {
8215 RTGCPTR GCPtrMem = *pGCPtrMem;
8216 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8217 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8218
8219 Assert(cbMem >= 1);
8220 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8221 return VINF_SUCCESS;
8222 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8223 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8224 return iemRaiseGeneralProtectionFault0(pVCpu);
8225 }
8226
8227 default:
8228 AssertFailedReturn(VERR_IEM_IPE_7);
8229 }
8230}
8231
8232
8233/**
8234 * Translates a virtual address to a physical physical address and checks if we
8235 * can access the page as specified.
8236 *
8237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8238 * @param GCPtrMem The virtual address.
8239 * @param fAccess The intended access.
8240 * @param pGCPhysMem Where to return the physical address.
8241 */
8242IEM_STATIC VBOXSTRICTRC
8243iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8244{
8245 /** @todo Need a different PGM interface here. We're currently using
8246 * generic / REM interfaces. this won't cut it for R0 & RC. */
8247 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8248 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8249 RTGCPHYS GCPhys;
8250 uint64_t fFlags;
8251 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8252 if (RT_FAILURE(rc))
8253 {
8254 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8255 /** @todo Check unassigned memory in unpaged mode. */
8256 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8257 *pGCPhysMem = NIL_RTGCPHYS;
8258 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8259 }
8260
8261 /* If the page is writable and does not have the no-exec bit set, all
8262 access is allowed. Otherwise we'll have to check more carefully... */
8263 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8264 {
8265 /* Write to read only memory? */
8266 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8267 && !(fFlags & X86_PTE_RW)
8268 && ( (pVCpu->iem.s.uCpl == 3
8269 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8270 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8271 {
8272 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8273 *pGCPhysMem = NIL_RTGCPHYS;
8274 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8275 }
8276
8277 /* Kernel memory accessed by userland? */
8278 if ( !(fFlags & X86_PTE_US)
8279 && pVCpu->iem.s.uCpl == 3
8280 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8281 {
8282 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8283 *pGCPhysMem = NIL_RTGCPHYS;
8284 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8285 }
8286
8287 /* Executing non-executable memory? */
8288 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8289 && (fFlags & X86_PTE_PAE_NX)
8290 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8291 {
8292 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8293 *pGCPhysMem = NIL_RTGCPHYS;
8294 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8295 VERR_ACCESS_DENIED);
8296 }
8297 }
8298
8299 /*
8300 * Set the dirty / access flags.
8301 * ASSUMES this is set when the address is translated rather than on committ...
8302 */
8303 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8304 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8305 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8306 {
8307 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8308 AssertRC(rc2);
8309 }
8310
8311 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8312 *pGCPhysMem = GCPhys;
8313 return VINF_SUCCESS;
8314}
8315
8316
8317
8318/**
8319 * Maps a physical page.
8320 *
8321 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8323 * @param GCPhysMem The physical address.
8324 * @param fAccess The intended access.
8325 * @param ppvMem Where to return the mapping address.
8326 * @param pLock The PGM lock.
8327 */
8328IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8329{
8330#ifdef IEM_LOG_MEMORY_WRITES
8331 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8332 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8333#endif
8334
8335 /** @todo This API may require some improving later. A private deal with PGM
8336 * regarding locking and unlocking needs to be struct. A couple of TLBs
8337 * living in PGM, but with publicly accessible inlined access methods
8338 * could perhaps be an even better solution. */
8339 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8340 GCPhysMem,
8341 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8342 pVCpu->iem.s.fBypassHandlers,
8343 ppvMem,
8344 pLock);
8345 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8346 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8347
8348 return rc;
8349}
8350
8351
8352/**
8353 * Unmap a page previously mapped by iemMemPageMap.
8354 *
8355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8356 * @param GCPhysMem The physical address.
8357 * @param fAccess The intended access.
8358 * @param pvMem What iemMemPageMap returned.
8359 * @param pLock The PGM lock.
8360 */
8361DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8362{
8363 NOREF(pVCpu);
8364 NOREF(GCPhysMem);
8365 NOREF(fAccess);
8366 NOREF(pvMem);
8367 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8368}
8369
8370
8371/**
8372 * Looks up a memory mapping entry.
8373 *
8374 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8376 * @param pvMem The memory address.
8377 * @param fAccess The access to.
8378 */
8379DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8380{
8381 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8382 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8383 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8384 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8385 return 0;
8386 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8387 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8388 return 1;
8389 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8390 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8391 return 2;
8392 return VERR_NOT_FOUND;
8393}
8394
8395
8396/**
8397 * Finds a free memmap entry when using iNextMapping doesn't work.
8398 *
8399 * @returns Memory mapping index, 1024 on failure.
8400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8401 */
8402IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8403{
8404 /*
8405 * The easy case.
8406 */
8407 if (pVCpu->iem.s.cActiveMappings == 0)
8408 {
8409 pVCpu->iem.s.iNextMapping = 1;
8410 return 0;
8411 }
8412
8413 /* There should be enough mappings for all instructions. */
8414 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8415
8416 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8417 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8418 return i;
8419
8420 AssertFailedReturn(1024);
8421}
8422
8423
8424/**
8425 * Commits a bounce buffer that needs writing back and unmaps it.
8426 *
8427 * @returns Strict VBox status code.
8428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8429 * @param iMemMap The index of the buffer to commit.
8430 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8431 * Always false in ring-3, obviously.
8432 */
8433IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8434{
8435 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8436 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8437#ifdef IN_RING3
8438 Assert(!fPostponeFail);
8439 RT_NOREF_PV(fPostponeFail);
8440#endif
8441
8442 /*
8443 * Do the writing.
8444 */
8445 PVM pVM = pVCpu->CTX_SUFF(pVM);
8446 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8447 {
8448 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8449 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8450 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8451 if (!pVCpu->iem.s.fBypassHandlers)
8452 {
8453 /*
8454 * Carefully and efficiently dealing with access handler return
8455 * codes make this a little bloated.
8456 */
8457 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8459 pbBuf,
8460 cbFirst,
8461 PGMACCESSORIGIN_IEM);
8462 if (rcStrict == VINF_SUCCESS)
8463 {
8464 if (cbSecond)
8465 {
8466 rcStrict = PGMPhysWrite(pVM,
8467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8468 pbBuf + cbFirst,
8469 cbSecond,
8470 PGMACCESSORIGIN_IEM);
8471 if (rcStrict == VINF_SUCCESS)
8472 { /* nothing */ }
8473 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8474 {
8475 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8476 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8478 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8479 }
8480#ifndef IN_RING3
8481 else if (fPostponeFail)
8482 {
8483 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8485 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8486 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8487 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8488 return iemSetPassUpStatus(pVCpu, rcStrict);
8489 }
8490#endif
8491 else
8492 {
8493 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8495 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8496 return rcStrict;
8497 }
8498 }
8499 }
8500 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8501 {
8502 if (!cbSecond)
8503 {
8504 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8506 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8507 }
8508 else
8509 {
8510 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8511 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8512 pbBuf + cbFirst,
8513 cbSecond,
8514 PGMACCESSORIGIN_IEM);
8515 if (rcStrict2 == VINF_SUCCESS)
8516 {
8517 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8518 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8519 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8520 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8521 }
8522 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8523 {
8524 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8525 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8526 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8527 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8528 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8529 }
8530#ifndef IN_RING3
8531 else if (fPostponeFail)
8532 {
8533 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8534 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8535 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8536 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8537 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8538 return iemSetPassUpStatus(pVCpu, rcStrict);
8539 }
8540#endif
8541 else
8542 {
8543 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8544 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8545 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8546 return rcStrict2;
8547 }
8548 }
8549 }
8550#ifndef IN_RING3
8551 else if (fPostponeFail)
8552 {
8553 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8554 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8555 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8556 if (!cbSecond)
8557 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8558 else
8559 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8560 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8561 return iemSetPassUpStatus(pVCpu, rcStrict);
8562 }
8563#endif
8564 else
8565 {
8566 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8567 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8568 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8569 return rcStrict;
8570 }
8571 }
8572 else
8573 {
8574 /*
8575 * No access handlers, much simpler.
8576 */
8577 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8578 if (RT_SUCCESS(rc))
8579 {
8580 if (cbSecond)
8581 {
8582 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8583 if (RT_SUCCESS(rc))
8584 { /* likely */ }
8585 else
8586 {
8587 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8588 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8589 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8590 return rc;
8591 }
8592 }
8593 }
8594 else
8595 {
8596 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8597 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8598 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8599 return rc;
8600 }
8601 }
8602 }
8603
8604#if defined(IEM_LOG_MEMORY_WRITES)
8605 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8606 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8607 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8608 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8609 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8610 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8611
8612 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8613 g_cbIemWrote = cbWrote;
8614 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8615#endif
8616
8617 /*
8618 * Free the mapping entry.
8619 */
8620 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8621 Assert(pVCpu->iem.s.cActiveMappings != 0);
8622 pVCpu->iem.s.cActiveMappings--;
8623 return VINF_SUCCESS;
8624}
8625
8626
8627/**
8628 * iemMemMap worker that deals with a request crossing pages.
8629 */
8630IEM_STATIC VBOXSTRICTRC
8631iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8632{
8633 /*
8634 * Do the address translations.
8635 */
8636 RTGCPHYS GCPhysFirst;
8637 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8638 if (rcStrict != VINF_SUCCESS)
8639 return rcStrict;
8640
8641 RTGCPHYS GCPhysSecond;
8642 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8643 fAccess, &GCPhysSecond);
8644 if (rcStrict != VINF_SUCCESS)
8645 return rcStrict;
8646 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8647
8648 PVM pVM = pVCpu->CTX_SUFF(pVM);
8649
8650 /*
8651 * Read in the current memory content if it's a read, execute or partial
8652 * write access.
8653 */
8654 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8655 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8656 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8657
8658 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8659 {
8660 if (!pVCpu->iem.s.fBypassHandlers)
8661 {
8662 /*
8663 * Must carefully deal with access handler status codes here,
8664 * makes the code a bit bloated.
8665 */
8666 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8667 if (rcStrict == VINF_SUCCESS)
8668 {
8669 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8670 if (rcStrict == VINF_SUCCESS)
8671 { /*likely */ }
8672 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8673 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8674 else
8675 {
8676 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8677 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8678 return rcStrict;
8679 }
8680 }
8681 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8682 {
8683 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8684 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8685 {
8686 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8687 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8688 }
8689 else
8690 {
8691 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8692 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8693 return rcStrict2;
8694 }
8695 }
8696 else
8697 {
8698 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8699 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8700 return rcStrict;
8701 }
8702 }
8703 else
8704 {
8705 /*
8706 * No informational status codes here, much more straight forward.
8707 */
8708 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8709 if (RT_SUCCESS(rc))
8710 {
8711 Assert(rc == VINF_SUCCESS);
8712 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8713 if (RT_SUCCESS(rc))
8714 Assert(rc == VINF_SUCCESS);
8715 else
8716 {
8717 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8718 return rc;
8719 }
8720 }
8721 else
8722 {
8723 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8724 return rc;
8725 }
8726 }
8727 }
8728#ifdef VBOX_STRICT
8729 else
8730 memset(pbBuf, 0xcc, cbMem);
8731 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8732 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8733#endif
8734
8735 /*
8736 * Commit the bounce buffer entry.
8737 */
8738 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8739 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8740 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8741 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8742 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8743 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8744 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8745 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8746 pVCpu->iem.s.cActiveMappings++;
8747
8748 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8749 *ppvMem = pbBuf;
8750 return VINF_SUCCESS;
8751}
8752
8753
8754/**
8755 * iemMemMap woker that deals with iemMemPageMap failures.
8756 */
8757IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8758 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8759{
8760 /*
8761 * Filter out conditions we can handle and the ones which shouldn't happen.
8762 */
8763 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8764 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8765 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8766 {
8767 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8768 return rcMap;
8769 }
8770 pVCpu->iem.s.cPotentialExits++;
8771
8772 /*
8773 * Read in the current memory content if it's a read, execute or partial
8774 * write access.
8775 */
8776 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8777 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8778 {
8779 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8780 memset(pbBuf, 0xff, cbMem);
8781 else
8782 {
8783 int rc;
8784 if (!pVCpu->iem.s.fBypassHandlers)
8785 {
8786 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8787 if (rcStrict == VINF_SUCCESS)
8788 { /* nothing */ }
8789 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8790 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8791 else
8792 {
8793 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8794 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8795 return rcStrict;
8796 }
8797 }
8798 else
8799 {
8800 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8801 if (RT_SUCCESS(rc))
8802 { /* likely */ }
8803 else
8804 {
8805 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8806 GCPhysFirst, rc));
8807 return rc;
8808 }
8809 }
8810 }
8811 }
8812#ifdef VBOX_STRICT
8813 else
8814 memset(pbBuf, 0xcc, cbMem);
8815#endif
8816#ifdef VBOX_STRICT
8817 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8818 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8819#endif
8820
8821 /*
8822 * Commit the bounce buffer entry.
8823 */
8824 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8825 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8826 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8827 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8828 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8829 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8830 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8831 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8832 pVCpu->iem.s.cActiveMappings++;
8833
8834 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8835 *ppvMem = pbBuf;
8836 return VINF_SUCCESS;
8837}
8838
8839
8840
8841/**
8842 * Maps the specified guest memory for the given kind of access.
8843 *
8844 * This may be using bounce buffering of the memory if it's crossing a page
8845 * boundary or if there is an access handler installed for any of it. Because
8846 * of lock prefix guarantees, we're in for some extra clutter when this
8847 * happens.
8848 *
8849 * This may raise a \#GP, \#SS, \#PF or \#AC.
8850 *
8851 * @returns VBox strict status code.
8852 *
8853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8854 * @param ppvMem Where to return the pointer to the mapped
8855 * memory.
8856 * @param cbMem The number of bytes to map. This is usually 1,
8857 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8858 * string operations it can be up to a page.
8859 * @param iSegReg The index of the segment register to use for
8860 * this access. The base and limits are checked.
8861 * Use UINT8_MAX to indicate that no segmentation
8862 * is required (for IDT, GDT and LDT accesses).
8863 * @param GCPtrMem The address of the guest memory.
8864 * @param fAccess How the memory is being accessed. The
8865 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8866 * how to map the memory, while the
8867 * IEM_ACCESS_WHAT_XXX bit is used when raising
8868 * exceptions.
8869 */
8870IEM_STATIC VBOXSTRICTRC
8871iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8872{
8873 /*
8874 * Check the input and figure out which mapping entry to use.
8875 */
8876 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8877 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8878 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8879
8880 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8881 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8882 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8883 {
8884 iMemMap = iemMemMapFindFree(pVCpu);
8885 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8886 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8887 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8888 pVCpu->iem.s.aMemMappings[2].fAccess),
8889 VERR_IEM_IPE_9);
8890 }
8891
8892 /*
8893 * Map the memory, checking that we can actually access it. If something
8894 * slightly complicated happens, fall back on bounce buffering.
8895 */
8896 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8897 if (rcStrict != VINF_SUCCESS)
8898 return rcStrict;
8899
8900 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8901 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8902
8903 RTGCPHYS GCPhysFirst;
8904 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8905 if (rcStrict != VINF_SUCCESS)
8906 return rcStrict;
8907
8908 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8909 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8910 if (fAccess & IEM_ACCESS_TYPE_READ)
8911 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8912
8913 void *pvMem;
8914 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8915 if (rcStrict != VINF_SUCCESS)
8916 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8917
8918 /*
8919 * Fill in the mapping table entry.
8920 */
8921 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8922 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8923 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8924 pVCpu->iem.s.cActiveMappings++;
8925
8926 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8927 *ppvMem = pvMem;
8928
8929 return VINF_SUCCESS;
8930}
8931
8932
8933/**
8934 * Commits the guest memory if bounce buffered and unmaps it.
8935 *
8936 * @returns Strict VBox status code.
8937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8938 * @param pvMem The mapping.
8939 * @param fAccess The kind of access.
8940 */
8941IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8942{
8943 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8944 AssertReturn(iMemMap >= 0, iMemMap);
8945
8946 /* If it's bounce buffered, we may need to write back the buffer. */
8947 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8948 {
8949 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8950 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8951 }
8952 /* Otherwise unlock it. */
8953 else
8954 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8955
8956 /* Free the entry. */
8957 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8958 Assert(pVCpu->iem.s.cActiveMappings != 0);
8959 pVCpu->iem.s.cActiveMappings--;
8960 return VINF_SUCCESS;
8961}
8962
8963#ifdef IEM_WITH_SETJMP
8964
8965/**
8966 * Maps the specified guest memory for the given kind of access, longjmp on
8967 * error.
8968 *
8969 * This may be using bounce buffering of the memory if it's crossing a page
8970 * boundary or if there is an access handler installed for any of it. Because
8971 * of lock prefix guarantees, we're in for some extra clutter when this
8972 * happens.
8973 *
8974 * This may raise a \#GP, \#SS, \#PF or \#AC.
8975 *
8976 * @returns Pointer to the mapped memory.
8977 *
8978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8979 * @param cbMem The number of bytes to map. This is usually 1,
8980 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8981 * string operations it can be up to a page.
8982 * @param iSegReg The index of the segment register to use for
8983 * this access. The base and limits are checked.
8984 * Use UINT8_MAX to indicate that no segmentation
8985 * is required (for IDT, GDT and LDT accesses).
8986 * @param GCPtrMem The address of the guest memory.
8987 * @param fAccess How the memory is being accessed. The
8988 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8989 * how to map the memory, while the
8990 * IEM_ACCESS_WHAT_XXX bit is used when raising
8991 * exceptions.
8992 */
8993IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8994{
8995 /*
8996 * Check the input and figure out which mapping entry to use.
8997 */
8998 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8999 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
9000 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
9001
9002 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9003 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9004 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9005 {
9006 iMemMap = iemMemMapFindFree(pVCpu);
9007 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9008 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9009 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9010 pVCpu->iem.s.aMemMappings[2].fAccess),
9011 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9012 }
9013
9014 /*
9015 * Map the memory, checking that we can actually access it. If something
9016 * slightly complicated happens, fall back on bounce buffering.
9017 */
9018 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9019 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9020 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9021
9022 /* Crossing a page boundary? */
9023 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9024 { /* No (likely). */ }
9025 else
9026 {
9027 void *pvMem;
9028 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9029 if (rcStrict == VINF_SUCCESS)
9030 return pvMem;
9031 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9032 }
9033
9034 RTGCPHYS GCPhysFirst;
9035 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9036 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9037 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9038
9039 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9040 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9041 if (fAccess & IEM_ACCESS_TYPE_READ)
9042 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9043
9044 void *pvMem;
9045 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9046 if (rcStrict == VINF_SUCCESS)
9047 { /* likely */ }
9048 else
9049 {
9050 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9051 if (rcStrict == VINF_SUCCESS)
9052 return pvMem;
9053 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9054 }
9055
9056 /*
9057 * Fill in the mapping table entry.
9058 */
9059 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9060 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9061 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9062 pVCpu->iem.s.cActiveMappings++;
9063
9064 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9065 return pvMem;
9066}
9067
9068
9069/**
9070 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9071 *
9072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9073 * @param pvMem The mapping.
9074 * @param fAccess The kind of access.
9075 */
9076IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9077{
9078 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9079 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9080
9081 /* If it's bounce buffered, we may need to write back the buffer. */
9082 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9083 {
9084 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9085 {
9086 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9087 if (rcStrict == VINF_SUCCESS)
9088 return;
9089 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9090 }
9091 }
9092 /* Otherwise unlock it. */
9093 else
9094 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9095
9096 /* Free the entry. */
9097 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9098 Assert(pVCpu->iem.s.cActiveMappings != 0);
9099 pVCpu->iem.s.cActiveMappings--;
9100}
9101
9102#endif /* IEM_WITH_SETJMP */
9103
9104#ifndef IN_RING3
9105/**
9106 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9107 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9108 *
9109 * Allows the instruction to be completed and retired, while the IEM user will
9110 * return to ring-3 immediately afterwards and do the postponed writes there.
9111 *
9112 * @returns VBox status code (no strict statuses). Caller must check
9113 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9115 * @param pvMem The mapping.
9116 * @param fAccess The kind of access.
9117 */
9118IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9119{
9120 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9121 AssertReturn(iMemMap >= 0, iMemMap);
9122
9123 /* If it's bounce buffered, we may need to write back the buffer. */
9124 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9125 {
9126 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9127 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9128 }
9129 /* Otherwise unlock it. */
9130 else
9131 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9132
9133 /* Free the entry. */
9134 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9135 Assert(pVCpu->iem.s.cActiveMappings != 0);
9136 pVCpu->iem.s.cActiveMappings--;
9137 return VINF_SUCCESS;
9138}
9139#endif
9140
9141
9142/**
9143 * Rollbacks mappings, releasing page locks and such.
9144 *
9145 * The caller shall only call this after checking cActiveMappings.
9146 *
9147 * @returns Strict VBox status code to pass up.
9148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9149 */
9150IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9151{
9152 Assert(pVCpu->iem.s.cActiveMappings > 0);
9153
9154 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9155 while (iMemMap-- > 0)
9156 {
9157 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9158 if (fAccess != IEM_ACCESS_INVALID)
9159 {
9160 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9161 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9162 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9163 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9164 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9165 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9166 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9167 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9168 pVCpu->iem.s.cActiveMappings--;
9169 }
9170 }
9171}
9172
9173
9174/**
9175 * Fetches a data byte.
9176 *
9177 * @returns Strict VBox status code.
9178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9179 * @param pu8Dst Where to return the byte.
9180 * @param iSegReg The index of the segment register to use for
9181 * this access. The base and limits are checked.
9182 * @param GCPtrMem The address of the guest memory.
9183 */
9184IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9185{
9186 /* The lazy approach for now... */
9187 uint8_t const *pu8Src;
9188 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9189 if (rc == VINF_SUCCESS)
9190 {
9191 *pu8Dst = *pu8Src;
9192 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9193 }
9194 return rc;
9195}
9196
9197
9198#ifdef IEM_WITH_SETJMP
9199/**
9200 * Fetches a data byte, longjmp on error.
9201 *
9202 * @returns The byte.
9203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9204 * @param iSegReg The index of the segment register to use for
9205 * this access. The base and limits are checked.
9206 * @param GCPtrMem The address of the guest memory.
9207 */
9208DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9209{
9210 /* The lazy approach for now... */
9211 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9212 uint8_t const bRet = *pu8Src;
9213 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9214 return bRet;
9215}
9216#endif /* IEM_WITH_SETJMP */
9217
9218
9219/**
9220 * Fetches a data word.
9221 *
9222 * @returns Strict VBox status code.
9223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9224 * @param pu16Dst Where to return the word.
9225 * @param iSegReg The index of the segment register to use for
9226 * this access. The base and limits are checked.
9227 * @param GCPtrMem The address of the guest memory.
9228 */
9229IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9230{
9231 /* The lazy approach for now... */
9232 uint16_t const *pu16Src;
9233 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9234 if (rc == VINF_SUCCESS)
9235 {
9236 *pu16Dst = *pu16Src;
9237 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9238 }
9239 return rc;
9240}
9241
9242
9243#ifdef IEM_WITH_SETJMP
9244/**
9245 * Fetches a data word, longjmp on error.
9246 *
9247 * @returns The word
9248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9249 * @param iSegReg The index of the segment register to use for
9250 * this access. The base and limits are checked.
9251 * @param GCPtrMem The address of the guest memory.
9252 */
9253DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9254{
9255 /* The lazy approach for now... */
9256 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9257 uint16_t const u16Ret = *pu16Src;
9258 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9259 return u16Ret;
9260}
9261#endif
9262
9263
9264/**
9265 * Fetches a data dword.
9266 *
9267 * @returns Strict VBox status code.
9268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9269 * @param pu32Dst Where to return the dword.
9270 * @param iSegReg The index of the segment register to use for
9271 * this access. The base and limits are checked.
9272 * @param GCPtrMem The address of the guest memory.
9273 */
9274IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9275{
9276 /* The lazy approach for now... */
9277 uint32_t const *pu32Src;
9278 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9279 if (rc == VINF_SUCCESS)
9280 {
9281 *pu32Dst = *pu32Src;
9282 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9283 }
9284 return rc;
9285}
9286
9287
9288#ifdef IEM_WITH_SETJMP
9289
9290IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9291{
9292 Assert(cbMem >= 1);
9293 Assert(iSegReg < X86_SREG_COUNT);
9294
9295 /*
9296 * 64-bit mode is simpler.
9297 */
9298 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9299 {
9300 if (iSegReg >= X86_SREG_FS)
9301 {
9302 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9303 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9304 GCPtrMem += pSel->u64Base;
9305 }
9306
9307 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9308 return GCPtrMem;
9309 }
9310 /*
9311 * 16-bit and 32-bit segmentation.
9312 */
9313 else
9314 {
9315 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9316 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9317 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9318 == X86DESCATTR_P /* data, expand up */
9319 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9320 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9321 {
9322 /* expand up */
9323 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9324 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9325 && GCPtrLast32 > (uint32_t)GCPtrMem))
9326 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9327 }
9328 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9329 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9330 {
9331 /* expand down */
9332 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9333 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9334 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9335 && GCPtrLast32 > (uint32_t)GCPtrMem))
9336 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9337 }
9338 else
9339 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9340 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9341 }
9342 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9343}
9344
9345
9346IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9347{
9348 Assert(cbMem >= 1);
9349 Assert(iSegReg < X86_SREG_COUNT);
9350
9351 /*
9352 * 64-bit mode is simpler.
9353 */
9354 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9355 {
9356 if (iSegReg >= X86_SREG_FS)
9357 {
9358 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9359 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9360 GCPtrMem += pSel->u64Base;
9361 }
9362
9363 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9364 return GCPtrMem;
9365 }
9366 /*
9367 * 16-bit and 32-bit segmentation.
9368 */
9369 else
9370 {
9371 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9372 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9373 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9374 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9375 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9376 {
9377 /* expand up */
9378 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9379 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9380 && GCPtrLast32 > (uint32_t)GCPtrMem))
9381 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9382 }
9383 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9384 {
9385 /* expand down */
9386 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9387 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9388 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9389 && GCPtrLast32 > (uint32_t)GCPtrMem))
9390 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9391 }
9392 else
9393 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9394 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9395 }
9396 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9397}
9398
9399
9400/**
9401 * Fetches a data dword, longjmp on error, fallback/safe version.
9402 *
9403 * @returns The dword
9404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9405 * @param iSegReg The index of the segment register to use for
9406 * this access. The base and limits are checked.
9407 * @param GCPtrMem The address of the guest memory.
9408 */
9409IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9410{
9411 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9412 uint32_t const u32Ret = *pu32Src;
9413 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9414 return u32Ret;
9415}
9416
9417
9418/**
9419 * Fetches a data dword, longjmp on error.
9420 *
9421 * @returns The dword
9422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9423 * @param iSegReg The index of the segment register to use for
9424 * this access. The base and limits are checked.
9425 * @param GCPtrMem The address of the guest memory.
9426 */
9427DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9428{
9429# ifdef IEM_WITH_DATA_TLB
9430 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9431 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9432 {
9433 /// @todo more later.
9434 }
9435
9436 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9437# else
9438 /* The lazy approach. */
9439 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9440 uint32_t const u32Ret = *pu32Src;
9441 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9442 return u32Ret;
9443# endif
9444}
9445#endif
9446
9447
9448#ifdef SOME_UNUSED_FUNCTION
9449/**
9450 * Fetches a data dword and sign extends it to a qword.
9451 *
9452 * @returns Strict VBox status code.
9453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9454 * @param pu64Dst Where to return the sign extended value.
9455 * @param iSegReg The index of the segment register to use for
9456 * this access. The base and limits are checked.
9457 * @param GCPtrMem The address of the guest memory.
9458 */
9459IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9460{
9461 /* The lazy approach for now... */
9462 int32_t const *pi32Src;
9463 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9464 if (rc == VINF_SUCCESS)
9465 {
9466 *pu64Dst = *pi32Src;
9467 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9468 }
9469#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9470 else
9471 *pu64Dst = 0;
9472#endif
9473 return rc;
9474}
9475#endif
9476
9477
9478/**
9479 * Fetches a data qword.
9480 *
9481 * @returns Strict VBox status code.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param pu64Dst Where to return the qword.
9484 * @param iSegReg The index of the segment register to use for
9485 * this access. The base and limits are checked.
9486 * @param GCPtrMem The address of the guest memory.
9487 */
9488IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9489{
9490 /* The lazy approach for now... */
9491 uint64_t const *pu64Src;
9492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9493 if (rc == VINF_SUCCESS)
9494 {
9495 *pu64Dst = *pu64Src;
9496 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9497 }
9498 return rc;
9499}
9500
9501
9502#ifdef IEM_WITH_SETJMP
9503/**
9504 * Fetches a data qword, longjmp on error.
9505 *
9506 * @returns The qword.
9507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9508 * @param iSegReg The index of the segment register to use for
9509 * this access. The base and limits are checked.
9510 * @param GCPtrMem The address of the guest memory.
9511 */
9512DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9513{
9514 /* The lazy approach for now... */
9515 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9516 uint64_t const u64Ret = *pu64Src;
9517 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9518 return u64Ret;
9519}
9520#endif
9521
9522
9523/**
9524 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9525 *
9526 * @returns Strict VBox status code.
9527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9528 * @param pu64Dst Where to return the qword.
9529 * @param iSegReg The index of the segment register to use for
9530 * this access. The base and limits are checked.
9531 * @param GCPtrMem The address of the guest memory.
9532 */
9533IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9534{
9535 /* The lazy approach for now... */
9536 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9537 if (RT_UNLIKELY(GCPtrMem & 15))
9538 return iemRaiseGeneralProtectionFault0(pVCpu);
9539
9540 uint64_t const *pu64Src;
9541 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9542 if (rc == VINF_SUCCESS)
9543 {
9544 *pu64Dst = *pu64Src;
9545 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9546 }
9547 return rc;
9548}
9549
9550
9551#ifdef IEM_WITH_SETJMP
9552/**
9553 * Fetches a data qword, longjmp on error.
9554 *
9555 * @returns The qword.
9556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9557 * @param iSegReg The index of the segment register to use for
9558 * this access. The base and limits are checked.
9559 * @param GCPtrMem The address of the guest memory.
9560 */
9561DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9562{
9563 /* The lazy approach for now... */
9564 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9565 if (RT_LIKELY(!(GCPtrMem & 15)))
9566 {
9567 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9568 uint64_t const u64Ret = *pu64Src;
9569 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9570 return u64Ret;
9571 }
9572
9573 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9574 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9575}
9576#endif
9577
9578
9579/**
9580 * Fetches a data tword.
9581 *
9582 * @returns Strict VBox status code.
9583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9584 * @param pr80Dst Where to return the tword.
9585 * @param iSegReg The index of the segment register to use for
9586 * this access. The base and limits are checked.
9587 * @param GCPtrMem The address of the guest memory.
9588 */
9589IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9590{
9591 /* The lazy approach for now... */
9592 PCRTFLOAT80U pr80Src;
9593 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9594 if (rc == VINF_SUCCESS)
9595 {
9596 *pr80Dst = *pr80Src;
9597 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9598 }
9599 return rc;
9600}
9601
9602
9603#ifdef IEM_WITH_SETJMP
9604/**
9605 * Fetches a data tword, longjmp on error.
9606 *
9607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9608 * @param pr80Dst Where to return the tword.
9609 * @param iSegReg The index of the segment register to use for
9610 * this access. The base and limits are checked.
9611 * @param GCPtrMem The address of the guest memory.
9612 */
9613DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9614{
9615 /* The lazy approach for now... */
9616 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9617 *pr80Dst = *pr80Src;
9618 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9619}
9620#endif
9621
9622
9623/**
9624 * Fetches a data dqword (double qword), generally SSE related.
9625 *
9626 * @returns Strict VBox status code.
9627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9628 * @param pu128Dst Where to return the qword.
9629 * @param iSegReg The index of the segment register to use for
9630 * this access. The base and limits are checked.
9631 * @param GCPtrMem The address of the guest memory.
9632 */
9633IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9634{
9635 /* The lazy approach for now... */
9636 PCRTUINT128U pu128Src;
9637 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9638 if (rc == VINF_SUCCESS)
9639 {
9640 pu128Dst->au64[0] = pu128Src->au64[0];
9641 pu128Dst->au64[1] = pu128Src->au64[1];
9642 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9643 }
9644 return rc;
9645}
9646
9647
9648#ifdef IEM_WITH_SETJMP
9649/**
9650 * Fetches a data dqword (double qword), generally SSE related.
9651 *
9652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9653 * @param pu128Dst Where to return the qword.
9654 * @param iSegReg The index of the segment register to use for
9655 * this access. The base and limits are checked.
9656 * @param GCPtrMem The address of the guest memory.
9657 */
9658IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9659{
9660 /* The lazy approach for now... */
9661 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9662 pu128Dst->au64[0] = pu128Src->au64[0];
9663 pu128Dst->au64[1] = pu128Src->au64[1];
9664 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9665}
9666#endif
9667
9668
9669/**
9670 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9671 * related.
9672 *
9673 * Raises \#GP(0) if not aligned.
9674 *
9675 * @returns Strict VBox status code.
9676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9677 * @param pu128Dst Where to return the qword.
9678 * @param iSegReg The index of the segment register to use for
9679 * this access. The base and limits are checked.
9680 * @param GCPtrMem The address of the guest memory.
9681 */
9682IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9683{
9684 /* The lazy approach for now... */
9685 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9686 if ( (GCPtrMem & 15)
9687 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9688 return iemRaiseGeneralProtectionFault0(pVCpu);
9689
9690 PCRTUINT128U pu128Src;
9691 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9692 if (rc == VINF_SUCCESS)
9693 {
9694 pu128Dst->au64[0] = pu128Src->au64[0];
9695 pu128Dst->au64[1] = pu128Src->au64[1];
9696 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9697 }
9698 return rc;
9699}
9700
9701
9702#ifdef IEM_WITH_SETJMP
9703/**
9704 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9705 * related, longjmp on error.
9706 *
9707 * Raises \#GP(0) if not aligned.
9708 *
9709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9710 * @param pu128Dst Where to return the qword.
9711 * @param iSegReg The index of the segment register to use for
9712 * this access. The base and limits are checked.
9713 * @param GCPtrMem The address of the guest memory.
9714 */
9715DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9716{
9717 /* The lazy approach for now... */
9718 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9719 if ( (GCPtrMem & 15) == 0
9720 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9721 {
9722 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9723 pu128Dst->au64[0] = pu128Src->au64[0];
9724 pu128Dst->au64[1] = pu128Src->au64[1];
9725 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9726 return;
9727 }
9728
9729 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9730 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9731}
9732#endif
9733
9734
9735/**
9736 * Fetches a data oword (octo word), generally AVX related.
9737 *
9738 * @returns Strict VBox status code.
9739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9740 * @param pu256Dst Where to return the qword.
9741 * @param iSegReg The index of the segment register to use for
9742 * this access. The base and limits are checked.
9743 * @param GCPtrMem The address of the guest memory.
9744 */
9745IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9746{
9747 /* The lazy approach for now... */
9748 PCRTUINT256U pu256Src;
9749 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9750 if (rc == VINF_SUCCESS)
9751 {
9752 pu256Dst->au64[0] = pu256Src->au64[0];
9753 pu256Dst->au64[1] = pu256Src->au64[1];
9754 pu256Dst->au64[2] = pu256Src->au64[2];
9755 pu256Dst->au64[3] = pu256Src->au64[3];
9756 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9757 }
9758 return rc;
9759}
9760
9761
9762#ifdef IEM_WITH_SETJMP
9763/**
9764 * Fetches a data oword (octo word), generally AVX related.
9765 *
9766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9767 * @param pu256Dst Where to return the qword.
9768 * @param iSegReg The index of the segment register to use for
9769 * this access. The base and limits are checked.
9770 * @param GCPtrMem The address of the guest memory.
9771 */
9772IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9773{
9774 /* The lazy approach for now... */
9775 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9776 pu256Dst->au64[0] = pu256Src->au64[0];
9777 pu256Dst->au64[1] = pu256Src->au64[1];
9778 pu256Dst->au64[2] = pu256Src->au64[2];
9779 pu256Dst->au64[3] = pu256Src->au64[3];
9780 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9781}
9782#endif
9783
9784
9785/**
9786 * Fetches a data oword (octo word) at an aligned address, generally AVX
9787 * related.
9788 *
9789 * Raises \#GP(0) if not aligned.
9790 *
9791 * @returns Strict VBox status code.
9792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9793 * @param pu256Dst Where to return the qword.
9794 * @param iSegReg The index of the segment register to use for
9795 * this access. The base and limits are checked.
9796 * @param GCPtrMem The address of the guest memory.
9797 */
9798IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9799{
9800 /* The lazy approach for now... */
9801 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9802 if (GCPtrMem & 31)
9803 return iemRaiseGeneralProtectionFault0(pVCpu);
9804
9805 PCRTUINT256U pu256Src;
9806 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9807 if (rc == VINF_SUCCESS)
9808 {
9809 pu256Dst->au64[0] = pu256Src->au64[0];
9810 pu256Dst->au64[1] = pu256Src->au64[1];
9811 pu256Dst->au64[2] = pu256Src->au64[2];
9812 pu256Dst->au64[3] = pu256Src->au64[3];
9813 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9814 }
9815 return rc;
9816}
9817
9818
9819#ifdef IEM_WITH_SETJMP
9820/**
9821 * Fetches a data oword (octo word) at an aligned address, generally AVX
9822 * related, longjmp on error.
9823 *
9824 * Raises \#GP(0) if not aligned.
9825 *
9826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9827 * @param pu256Dst Where to return the qword.
9828 * @param iSegReg The index of the segment register to use for
9829 * this access. The base and limits are checked.
9830 * @param GCPtrMem The address of the guest memory.
9831 */
9832DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9833{
9834 /* The lazy approach for now... */
9835 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9836 if ((GCPtrMem & 31) == 0)
9837 {
9838 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9839 pu256Dst->au64[0] = pu256Src->au64[0];
9840 pu256Dst->au64[1] = pu256Src->au64[1];
9841 pu256Dst->au64[2] = pu256Src->au64[2];
9842 pu256Dst->au64[3] = pu256Src->au64[3];
9843 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9844 return;
9845 }
9846
9847 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9848 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9849}
9850#endif
9851
9852
9853
9854/**
9855 * Fetches a descriptor register (lgdt, lidt).
9856 *
9857 * @returns Strict VBox status code.
9858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9859 * @param pcbLimit Where to return the limit.
9860 * @param pGCPtrBase Where to return the base.
9861 * @param iSegReg The index of the segment register to use for
9862 * this access. The base and limits are checked.
9863 * @param GCPtrMem The address of the guest memory.
9864 * @param enmOpSize The effective operand size.
9865 */
9866IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9867 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9868{
9869 /*
9870 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9871 * little special:
9872 * - The two reads are done separately.
9873 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9874 * - We suspect the 386 to actually commit the limit before the base in
9875 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9876 * don't try emulate this eccentric behavior, because it's not well
9877 * enough understood and rather hard to trigger.
9878 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9879 */
9880 VBOXSTRICTRC rcStrict;
9881 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9882 {
9883 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9884 if (rcStrict == VINF_SUCCESS)
9885 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9886 }
9887 else
9888 {
9889 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9890 if (enmOpSize == IEMMODE_32BIT)
9891 {
9892 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9893 {
9894 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9895 if (rcStrict == VINF_SUCCESS)
9896 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9897 }
9898 else
9899 {
9900 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9901 if (rcStrict == VINF_SUCCESS)
9902 {
9903 *pcbLimit = (uint16_t)uTmp;
9904 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9905 }
9906 }
9907 if (rcStrict == VINF_SUCCESS)
9908 *pGCPtrBase = uTmp;
9909 }
9910 else
9911 {
9912 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9913 if (rcStrict == VINF_SUCCESS)
9914 {
9915 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9916 if (rcStrict == VINF_SUCCESS)
9917 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9918 }
9919 }
9920 }
9921 return rcStrict;
9922}
9923
9924
9925
9926/**
9927 * Stores a data byte.
9928 *
9929 * @returns Strict VBox status code.
9930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9931 * @param iSegReg The index of the segment register to use for
9932 * this access. The base and limits are checked.
9933 * @param GCPtrMem The address of the guest memory.
9934 * @param u8Value The value to store.
9935 */
9936IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9937{
9938 /* The lazy approach for now... */
9939 uint8_t *pu8Dst;
9940 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9941 if (rc == VINF_SUCCESS)
9942 {
9943 *pu8Dst = u8Value;
9944 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9945 }
9946 return rc;
9947}
9948
9949
9950#ifdef IEM_WITH_SETJMP
9951/**
9952 * Stores a data byte, longjmp on error.
9953 *
9954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9955 * @param iSegReg The index of the segment register to use for
9956 * this access. The base and limits are checked.
9957 * @param GCPtrMem The address of the guest memory.
9958 * @param u8Value The value to store.
9959 */
9960IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9961{
9962 /* The lazy approach for now... */
9963 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9964 *pu8Dst = u8Value;
9965 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9966}
9967#endif
9968
9969
9970/**
9971 * Stores a data word.
9972 *
9973 * @returns Strict VBox status code.
9974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9975 * @param iSegReg The index of the segment register to use for
9976 * this access. The base and limits are checked.
9977 * @param GCPtrMem The address of the guest memory.
9978 * @param u16Value The value to store.
9979 */
9980IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9981{
9982 /* The lazy approach for now... */
9983 uint16_t *pu16Dst;
9984 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9985 if (rc == VINF_SUCCESS)
9986 {
9987 *pu16Dst = u16Value;
9988 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9989 }
9990 return rc;
9991}
9992
9993
9994#ifdef IEM_WITH_SETJMP
9995/**
9996 * Stores a data word, longjmp on error.
9997 *
9998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9999 * @param iSegReg The index of the segment register to use for
10000 * this access. The base and limits are checked.
10001 * @param GCPtrMem The address of the guest memory.
10002 * @param u16Value The value to store.
10003 */
10004IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10005{
10006 /* The lazy approach for now... */
10007 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10008 *pu16Dst = u16Value;
10009 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10010}
10011#endif
10012
10013
10014/**
10015 * Stores a data dword.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u32Value The value to store.
10023 */
10024IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10025{
10026 /* The lazy approach for now... */
10027 uint32_t *pu32Dst;
10028 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10029 if (rc == VINF_SUCCESS)
10030 {
10031 *pu32Dst = u32Value;
10032 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10033 }
10034 return rc;
10035}
10036
10037
10038#ifdef IEM_WITH_SETJMP
10039/**
10040 * Stores a data dword.
10041 *
10042 * @returns Strict VBox status code.
10043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10044 * @param iSegReg The index of the segment register to use for
10045 * this access. The base and limits are checked.
10046 * @param GCPtrMem The address of the guest memory.
10047 * @param u32Value The value to store.
10048 */
10049IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10050{
10051 /* The lazy approach for now... */
10052 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10053 *pu32Dst = u32Value;
10054 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10055}
10056#endif
10057
10058
10059/**
10060 * Stores a data qword.
10061 *
10062 * @returns Strict VBox status code.
10063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10064 * @param iSegReg The index of the segment register to use for
10065 * this access. The base and limits are checked.
10066 * @param GCPtrMem The address of the guest memory.
10067 * @param u64Value The value to store.
10068 */
10069IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10070{
10071 /* The lazy approach for now... */
10072 uint64_t *pu64Dst;
10073 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10074 if (rc == VINF_SUCCESS)
10075 {
10076 *pu64Dst = u64Value;
10077 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10078 }
10079 return rc;
10080}
10081
10082
10083#ifdef IEM_WITH_SETJMP
10084/**
10085 * Stores a data qword, longjmp on error.
10086 *
10087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10088 * @param iSegReg The index of the segment register to use for
10089 * this access. The base and limits are checked.
10090 * @param GCPtrMem The address of the guest memory.
10091 * @param u64Value The value to store.
10092 */
10093IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10094{
10095 /* The lazy approach for now... */
10096 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10097 *pu64Dst = u64Value;
10098 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10099}
10100#endif
10101
10102
10103/**
10104 * Stores a data dqword.
10105 *
10106 * @returns Strict VBox status code.
10107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10108 * @param iSegReg The index of the segment register to use for
10109 * this access. The base and limits are checked.
10110 * @param GCPtrMem The address of the guest memory.
10111 * @param u128Value The value to store.
10112 */
10113IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10114{
10115 /* The lazy approach for now... */
10116 PRTUINT128U pu128Dst;
10117 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10118 if (rc == VINF_SUCCESS)
10119 {
10120 pu128Dst->au64[0] = u128Value.au64[0];
10121 pu128Dst->au64[1] = u128Value.au64[1];
10122 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10123 }
10124 return rc;
10125}
10126
10127
10128#ifdef IEM_WITH_SETJMP
10129/**
10130 * Stores a data dqword, longjmp on error.
10131 *
10132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10133 * @param iSegReg The index of the segment register to use for
10134 * this access. The base and limits are checked.
10135 * @param GCPtrMem The address of the guest memory.
10136 * @param u128Value The value to store.
10137 */
10138IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10139{
10140 /* The lazy approach for now... */
10141 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10142 pu128Dst->au64[0] = u128Value.au64[0];
10143 pu128Dst->au64[1] = u128Value.au64[1];
10144 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10145}
10146#endif
10147
10148
10149/**
10150 * Stores a data dqword, SSE aligned.
10151 *
10152 * @returns Strict VBox status code.
10153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10154 * @param iSegReg The index of the segment register to use for
10155 * this access. The base and limits are checked.
10156 * @param GCPtrMem The address of the guest memory.
10157 * @param u128Value The value to store.
10158 */
10159IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10160{
10161 /* The lazy approach for now... */
10162 if ( (GCPtrMem & 15)
10163 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10164 return iemRaiseGeneralProtectionFault0(pVCpu);
10165
10166 PRTUINT128U pu128Dst;
10167 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10168 if (rc == VINF_SUCCESS)
10169 {
10170 pu128Dst->au64[0] = u128Value.au64[0];
10171 pu128Dst->au64[1] = u128Value.au64[1];
10172 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10173 }
10174 return rc;
10175}
10176
10177
10178#ifdef IEM_WITH_SETJMP
10179/**
10180 * Stores a data dqword, SSE aligned.
10181 *
10182 * @returns Strict VBox status code.
10183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10184 * @param iSegReg The index of the segment register to use for
10185 * this access. The base and limits are checked.
10186 * @param GCPtrMem The address of the guest memory.
10187 * @param u128Value The value to store.
10188 */
10189DECL_NO_INLINE(IEM_STATIC, void)
10190iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10191{
10192 /* The lazy approach for now... */
10193 if ( (GCPtrMem & 15) == 0
10194 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10195 {
10196 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10197 pu128Dst->au64[0] = u128Value.au64[0];
10198 pu128Dst->au64[1] = u128Value.au64[1];
10199 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10200 return;
10201 }
10202
10203 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10204 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10205}
10206#endif
10207
10208
10209/**
10210 * Stores a data dqword.
10211 *
10212 * @returns Strict VBox status code.
10213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10214 * @param iSegReg The index of the segment register to use for
10215 * this access. The base and limits are checked.
10216 * @param GCPtrMem The address of the guest memory.
10217 * @param pu256Value Pointer to the value to store.
10218 */
10219IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10220{
10221 /* The lazy approach for now... */
10222 PRTUINT256U pu256Dst;
10223 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10224 if (rc == VINF_SUCCESS)
10225 {
10226 pu256Dst->au64[0] = pu256Value->au64[0];
10227 pu256Dst->au64[1] = pu256Value->au64[1];
10228 pu256Dst->au64[2] = pu256Value->au64[2];
10229 pu256Dst->au64[3] = pu256Value->au64[3];
10230 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10231 }
10232 return rc;
10233}
10234
10235
10236#ifdef IEM_WITH_SETJMP
10237/**
10238 * Stores a data dqword, longjmp on error.
10239 *
10240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10241 * @param iSegReg The index of the segment register to use for
10242 * this access. The base and limits are checked.
10243 * @param GCPtrMem The address of the guest memory.
10244 * @param pu256Value Pointer to the value to store.
10245 */
10246IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10247{
10248 /* The lazy approach for now... */
10249 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10250 pu256Dst->au64[0] = pu256Value->au64[0];
10251 pu256Dst->au64[1] = pu256Value->au64[1];
10252 pu256Dst->au64[2] = pu256Value->au64[2];
10253 pu256Dst->au64[3] = pu256Value->au64[3];
10254 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10255}
10256#endif
10257
10258
10259/**
10260 * Stores a data dqword, AVX aligned.
10261 *
10262 * @returns Strict VBox status code.
10263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10264 * @param iSegReg The index of the segment register to use for
10265 * this access. The base and limits are checked.
10266 * @param GCPtrMem The address of the guest memory.
10267 * @param pu256Value Pointer to the value to store.
10268 */
10269IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10270{
10271 /* The lazy approach for now... */
10272 if (GCPtrMem & 31)
10273 return iemRaiseGeneralProtectionFault0(pVCpu);
10274
10275 PRTUINT256U pu256Dst;
10276 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10277 if (rc == VINF_SUCCESS)
10278 {
10279 pu256Dst->au64[0] = pu256Value->au64[0];
10280 pu256Dst->au64[1] = pu256Value->au64[1];
10281 pu256Dst->au64[2] = pu256Value->au64[2];
10282 pu256Dst->au64[3] = pu256Value->au64[3];
10283 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10284 }
10285 return rc;
10286}
10287
10288
10289#ifdef IEM_WITH_SETJMP
10290/**
10291 * Stores a data dqword, AVX aligned.
10292 *
10293 * @returns Strict VBox status code.
10294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10295 * @param iSegReg The index of the segment register to use for
10296 * this access. The base and limits are checked.
10297 * @param GCPtrMem The address of the guest memory.
10298 * @param pu256Value Pointer to the value to store.
10299 */
10300DECL_NO_INLINE(IEM_STATIC, void)
10301iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10302{
10303 /* The lazy approach for now... */
10304 if ((GCPtrMem & 31) == 0)
10305 {
10306 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10307 pu256Dst->au64[0] = pu256Value->au64[0];
10308 pu256Dst->au64[1] = pu256Value->au64[1];
10309 pu256Dst->au64[2] = pu256Value->au64[2];
10310 pu256Dst->au64[3] = pu256Value->au64[3];
10311 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10312 return;
10313 }
10314
10315 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10316 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10317}
10318#endif
10319
10320
10321/**
10322 * Stores a descriptor register (sgdt, sidt).
10323 *
10324 * @returns Strict VBox status code.
10325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10326 * @param cbLimit The limit.
10327 * @param GCPtrBase The base address.
10328 * @param iSegReg The index of the segment register to use for
10329 * this access. The base and limits are checked.
10330 * @param GCPtrMem The address of the guest memory.
10331 */
10332IEM_STATIC VBOXSTRICTRC
10333iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10334{
10335 /*
10336 * The SIDT and SGDT instructions actually stores the data using two
10337 * independent writes. The instructions does not respond to opsize prefixes.
10338 */
10339 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10340 if (rcStrict == VINF_SUCCESS)
10341 {
10342 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10343 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10344 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10345 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10346 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10347 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10348 else
10349 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10350 }
10351 return rcStrict;
10352}
10353
10354
10355/**
10356 * Pushes a word onto the stack.
10357 *
10358 * @returns Strict VBox status code.
10359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10360 * @param u16Value The value to push.
10361 */
10362IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10363{
10364 /* Increment the stack pointer. */
10365 uint64_t uNewRsp;
10366 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10367
10368 /* Write the word the lazy way. */
10369 uint16_t *pu16Dst;
10370 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10371 if (rc == VINF_SUCCESS)
10372 {
10373 *pu16Dst = u16Value;
10374 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10375 }
10376
10377 /* Commit the new RSP value unless we an access handler made trouble. */
10378 if (rc == VINF_SUCCESS)
10379 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10380
10381 return rc;
10382}
10383
10384
10385/**
10386 * Pushes a dword onto the stack.
10387 *
10388 * @returns Strict VBox status code.
10389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10390 * @param u32Value The value to push.
10391 */
10392IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10393{
10394 /* Increment the stack pointer. */
10395 uint64_t uNewRsp;
10396 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10397
10398 /* Write the dword the lazy way. */
10399 uint32_t *pu32Dst;
10400 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10401 if (rc == VINF_SUCCESS)
10402 {
10403 *pu32Dst = u32Value;
10404 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10405 }
10406
10407 /* Commit the new RSP value unless we an access handler made trouble. */
10408 if (rc == VINF_SUCCESS)
10409 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10410
10411 return rc;
10412}
10413
10414
10415/**
10416 * Pushes a dword segment register value onto the stack.
10417 *
10418 * @returns Strict VBox status code.
10419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10420 * @param u32Value The value to push.
10421 */
10422IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10423{
10424 /* Increment the stack pointer. */
10425 uint64_t uNewRsp;
10426 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10427
10428 /* The intel docs talks about zero extending the selector register
10429 value. My actual intel CPU here might be zero extending the value
10430 but it still only writes the lower word... */
10431 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10432 * happens when crossing an electric page boundrary, is the high word checked
10433 * for write accessibility or not? Probably it is. What about segment limits?
10434 * It appears this behavior is also shared with trap error codes.
10435 *
10436 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10437 * ancient hardware when it actually did change. */
10438 uint16_t *pu16Dst;
10439 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10440 if (rc == VINF_SUCCESS)
10441 {
10442 *pu16Dst = (uint16_t)u32Value;
10443 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10444 }
10445
10446 /* Commit the new RSP value unless we an access handler made trouble. */
10447 if (rc == VINF_SUCCESS)
10448 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10449
10450 return rc;
10451}
10452
10453
10454/**
10455 * Pushes a qword onto the stack.
10456 *
10457 * @returns Strict VBox status code.
10458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10459 * @param u64Value The value to push.
10460 */
10461IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10462{
10463 /* Increment the stack pointer. */
10464 uint64_t uNewRsp;
10465 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10466
10467 /* Write the word the lazy way. */
10468 uint64_t *pu64Dst;
10469 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10470 if (rc == VINF_SUCCESS)
10471 {
10472 *pu64Dst = u64Value;
10473 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10474 }
10475
10476 /* Commit the new RSP value unless we an access handler made trouble. */
10477 if (rc == VINF_SUCCESS)
10478 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10479
10480 return rc;
10481}
10482
10483
10484/**
10485 * Pops a word from the stack.
10486 *
10487 * @returns Strict VBox status code.
10488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10489 * @param pu16Value Where to store the popped value.
10490 */
10491IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10492{
10493 /* Increment the stack pointer. */
10494 uint64_t uNewRsp;
10495 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10496
10497 /* Write the word the lazy way. */
10498 uint16_t const *pu16Src;
10499 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10500 if (rc == VINF_SUCCESS)
10501 {
10502 *pu16Value = *pu16Src;
10503 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10504
10505 /* Commit the new RSP value. */
10506 if (rc == VINF_SUCCESS)
10507 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10508 }
10509
10510 return rc;
10511}
10512
10513
10514/**
10515 * Pops a dword from the stack.
10516 *
10517 * @returns Strict VBox status code.
10518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10519 * @param pu32Value Where to store the popped value.
10520 */
10521IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10522{
10523 /* Increment the stack pointer. */
10524 uint64_t uNewRsp;
10525 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10526
10527 /* Write the word the lazy way. */
10528 uint32_t const *pu32Src;
10529 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10530 if (rc == VINF_SUCCESS)
10531 {
10532 *pu32Value = *pu32Src;
10533 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10534
10535 /* Commit the new RSP value. */
10536 if (rc == VINF_SUCCESS)
10537 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10538 }
10539
10540 return rc;
10541}
10542
10543
10544/**
10545 * Pops a qword from the stack.
10546 *
10547 * @returns Strict VBox status code.
10548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10549 * @param pu64Value Where to store the popped value.
10550 */
10551IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10552{
10553 /* Increment the stack pointer. */
10554 uint64_t uNewRsp;
10555 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10556
10557 /* Write the word the lazy way. */
10558 uint64_t const *pu64Src;
10559 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10560 if (rc == VINF_SUCCESS)
10561 {
10562 *pu64Value = *pu64Src;
10563 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10564
10565 /* Commit the new RSP value. */
10566 if (rc == VINF_SUCCESS)
10567 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10568 }
10569
10570 return rc;
10571}
10572
10573
10574/**
10575 * Pushes a word onto the stack, using a temporary stack pointer.
10576 *
10577 * @returns Strict VBox status code.
10578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10579 * @param u16Value The value to push.
10580 * @param pTmpRsp Pointer to the temporary stack pointer.
10581 */
10582IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10583{
10584 /* Increment the stack pointer. */
10585 RTUINT64U NewRsp = *pTmpRsp;
10586 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10587
10588 /* Write the word the lazy way. */
10589 uint16_t *pu16Dst;
10590 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10591 if (rc == VINF_SUCCESS)
10592 {
10593 *pu16Dst = u16Value;
10594 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10595 }
10596
10597 /* Commit the new RSP value unless we an access handler made trouble. */
10598 if (rc == VINF_SUCCESS)
10599 *pTmpRsp = NewRsp;
10600
10601 return rc;
10602}
10603
10604
10605/**
10606 * Pushes a dword onto the stack, using a temporary stack pointer.
10607 *
10608 * @returns Strict VBox status code.
10609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10610 * @param u32Value The value to push.
10611 * @param pTmpRsp Pointer to the temporary stack pointer.
10612 */
10613IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10614{
10615 /* Increment the stack pointer. */
10616 RTUINT64U NewRsp = *pTmpRsp;
10617 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10618
10619 /* Write the word the lazy way. */
10620 uint32_t *pu32Dst;
10621 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10622 if (rc == VINF_SUCCESS)
10623 {
10624 *pu32Dst = u32Value;
10625 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10626 }
10627
10628 /* Commit the new RSP value unless we an access handler made trouble. */
10629 if (rc == VINF_SUCCESS)
10630 *pTmpRsp = NewRsp;
10631
10632 return rc;
10633}
10634
10635
10636/**
10637 * Pushes a dword onto the stack, using a temporary stack pointer.
10638 *
10639 * @returns Strict VBox status code.
10640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10641 * @param u64Value The value to push.
10642 * @param pTmpRsp Pointer to the temporary stack pointer.
10643 */
10644IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10645{
10646 /* Increment the stack pointer. */
10647 RTUINT64U NewRsp = *pTmpRsp;
10648 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10649
10650 /* Write the word the lazy way. */
10651 uint64_t *pu64Dst;
10652 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10653 if (rc == VINF_SUCCESS)
10654 {
10655 *pu64Dst = u64Value;
10656 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10657 }
10658
10659 /* Commit the new RSP value unless we an access handler made trouble. */
10660 if (rc == VINF_SUCCESS)
10661 *pTmpRsp = NewRsp;
10662
10663 return rc;
10664}
10665
10666
10667/**
10668 * Pops a word from the stack, using a temporary stack pointer.
10669 *
10670 * @returns Strict VBox status code.
10671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10672 * @param pu16Value Where to store the popped value.
10673 * @param pTmpRsp Pointer to the temporary stack pointer.
10674 */
10675IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10676{
10677 /* Increment the stack pointer. */
10678 RTUINT64U NewRsp = *pTmpRsp;
10679 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10680
10681 /* Write the word the lazy way. */
10682 uint16_t const *pu16Src;
10683 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10684 if (rc == VINF_SUCCESS)
10685 {
10686 *pu16Value = *pu16Src;
10687 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10688
10689 /* Commit the new RSP value. */
10690 if (rc == VINF_SUCCESS)
10691 *pTmpRsp = NewRsp;
10692 }
10693
10694 return rc;
10695}
10696
10697
10698/**
10699 * Pops a dword from the stack, using a temporary stack pointer.
10700 *
10701 * @returns Strict VBox status code.
10702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10703 * @param pu32Value Where to store the popped value.
10704 * @param pTmpRsp Pointer to the temporary stack pointer.
10705 */
10706IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10707{
10708 /* Increment the stack pointer. */
10709 RTUINT64U NewRsp = *pTmpRsp;
10710 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10711
10712 /* Write the word the lazy way. */
10713 uint32_t const *pu32Src;
10714 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10715 if (rc == VINF_SUCCESS)
10716 {
10717 *pu32Value = *pu32Src;
10718 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10719
10720 /* Commit the new RSP value. */
10721 if (rc == VINF_SUCCESS)
10722 *pTmpRsp = NewRsp;
10723 }
10724
10725 return rc;
10726}
10727
10728
10729/**
10730 * Pops a qword from the stack, using a temporary stack pointer.
10731 *
10732 * @returns Strict VBox status code.
10733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10734 * @param pu64Value Where to store the popped value.
10735 * @param pTmpRsp Pointer to the temporary stack pointer.
10736 */
10737IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10738{
10739 /* Increment the stack pointer. */
10740 RTUINT64U NewRsp = *pTmpRsp;
10741 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10742
10743 /* Write the word the lazy way. */
10744 uint64_t const *pu64Src;
10745 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10746 if (rcStrict == VINF_SUCCESS)
10747 {
10748 *pu64Value = *pu64Src;
10749 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10750
10751 /* Commit the new RSP value. */
10752 if (rcStrict == VINF_SUCCESS)
10753 *pTmpRsp = NewRsp;
10754 }
10755
10756 return rcStrict;
10757}
10758
10759
10760/**
10761 * Begin a special stack push (used by interrupt, exceptions and such).
10762 *
10763 * This will raise \#SS or \#PF if appropriate.
10764 *
10765 * @returns Strict VBox status code.
10766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10767 * @param cbMem The number of bytes to push onto the stack.
10768 * @param ppvMem Where to return the pointer to the stack memory.
10769 * As with the other memory functions this could be
10770 * direct access or bounce buffered access, so
10771 * don't commit register until the commit call
10772 * succeeds.
10773 * @param puNewRsp Where to return the new RSP value. This must be
10774 * passed unchanged to
10775 * iemMemStackPushCommitSpecial().
10776 */
10777IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10778{
10779 Assert(cbMem < UINT8_MAX);
10780 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10781 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10782}
10783
10784
10785/**
10786 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10787 *
10788 * This will update the rSP.
10789 *
10790 * @returns Strict VBox status code.
10791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10792 * @param pvMem The pointer returned by
10793 * iemMemStackPushBeginSpecial().
10794 * @param uNewRsp The new RSP value returned by
10795 * iemMemStackPushBeginSpecial().
10796 */
10797IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10798{
10799 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10800 if (rcStrict == VINF_SUCCESS)
10801 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10802 return rcStrict;
10803}
10804
10805
10806/**
10807 * Begin a special stack pop (used by iret, retf and such).
10808 *
10809 * This will raise \#SS or \#PF if appropriate.
10810 *
10811 * @returns Strict VBox status code.
10812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10813 * @param cbMem The number of bytes to pop from the stack.
10814 * @param ppvMem Where to return the pointer to the stack memory.
10815 * @param puNewRsp Where to return the new RSP value. This must be
10816 * assigned to CPUMCTX::rsp manually some time
10817 * after iemMemStackPopDoneSpecial() has been
10818 * called.
10819 */
10820IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10821{
10822 Assert(cbMem < UINT8_MAX);
10823 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10824 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10825}
10826
10827
10828/**
10829 * Continue a special stack pop (used by iret and retf).
10830 *
10831 * This will raise \#SS or \#PF if appropriate.
10832 *
10833 * @returns Strict VBox status code.
10834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10835 * @param cbMem The number of bytes to pop from the stack.
10836 * @param ppvMem Where to return the pointer to the stack memory.
10837 * @param puNewRsp Where to return the new RSP value. This must be
10838 * assigned to CPUMCTX::rsp manually some time
10839 * after iemMemStackPopDoneSpecial() has been
10840 * called.
10841 */
10842IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10843{
10844 Assert(cbMem < UINT8_MAX);
10845 RTUINT64U NewRsp;
10846 NewRsp.u = *puNewRsp;
10847 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10848 *puNewRsp = NewRsp.u;
10849 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10850}
10851
10852
10853/**
10854 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10855 * iemMemStackPopContinueSpecial).
10856 *
10857 * The caller will manually commit the rSP.
10858 *
10859 * @returns Strict VBox status code.
10860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10861 * @param pvMem The pointer returned by
10862 * iemMemStackPopBeginSpecial() or
10863 * iemMemStackPopContinueSpecial().
10864 */
10865IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10866{
10867 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10868}
10869
10870
10871/**
10872 * Fetches a system table byte.
10873 *
10874 * @returns Strict VBox status code.
10875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10876 * @param pbDst Where to return the byte.
10877 * @param iSegReg The index of the segment register to use for
10878 * this access. The base and limits are checked.
10879 * @param GCPtrMem The address of the guest memory.
10880 */
10881IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10882{
10883 /* The lazy approach for now... */
10884 uint8_t const *pbSrc;
10885 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10886 if (rc == VINF_SUCCESS)
10887 {
10888 *pbDst = *pbSrc;
10889 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10890 }
10891 return rc;
10892}
10893
10894
10895/**
10896 * Fetches a system table word.
10897 *
10898 * @returns Strict VBox status code.
10899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10900 * @param pu16Dst Where to return the word.
10901 * @param iSegReg The index of the segment register to use for
10902 * this access. The base and limits are checked.
10903 * @param GCPtrMem The address of the guest memory.
10904 */
10905IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10906{
10907 /* The lazy approach for now... */
10908 uint16_t const *pu16Src;
10909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10910 if (rc == VINF_SUCCESS)
10911 {
10912 *pu16Dst = *pu16Src;
10913 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10914 }
10915 return rc;
10916}
10917
10918
10919/**
10920 * Fetches a system table dword.
10921 *
10922 * @returns Strict VBox status code.
10923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10924 * @param pu32Dst Where to return the dword.
10925 * @param iSegReg The index of the segment register to use for
10926 * this access. The base and limits are checked.
10927 * @param GCPtrMem The address of the guest memory.
10928 */
10929IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10930{
10931 /* The lazy approach for now... */
10932 uint32_t const *pu32Src;
10933 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10934 if (rc == VINF_SUCCESS)
10935 {
10936 *pu32Dst = *pu32Src;
10937 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10938 }
10939 return rc;
10940}
10941
10942
10943/**
10944 * Fetches a system table qword.
10945 *
10946 * @returns Strict VBox status code.
10947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10948 * @param pu64Dst Where to return the qword.
10949 * @param iSegReg The index of the segment register to use for
10950 * this access. The base and limits are checked.
10951 * @param GCPtrMem The address of the guest memory.
10952 */
10953IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10954{
10955 /* The lazy approach for now... */
10956 uint64_t const *pu64Src;
10957 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10958 if (rc == VINF_SUCCESS)
10959 {
10960 *pu64Dst = *pu64Src;
10961 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10962 }
10963 return rc;
10964}
10965
10966
10967/**
10968 * Fetches a descriptor table entry with caller specified error code.
10969 *
10970 * @returns Strict VBox status code.
10971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10972 * @param pDesc Where to return the descriptor table entry.
10973 * @param uSel The selector which table entry to fetch.
10974 * @param uXcpt The exception to raise on table lookup error.
10975 * @param uErrorCode The error code associated with the exception.
10976 */
10977IEM_STATIC VBOXSTRICTRC
10978iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10979{
10980 AssertPtr(pDesc);
10981 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10982
10983 /** @todo did the 286 require all 8 bytes to be accessible? */
10984 /*
10985 * Get the selector table base and check bounds.
10986 */
10987 RTGCPTR GCPtrBase;
10988 if (uSel & X86_SEL_LDT)
10989 {
10990 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10991 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10992 {
10993 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10994 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10995 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10996 uErrorCode, 0);
10997 }
10998
10999 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
11000 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
11001 }
11002 else
11003 {
11004 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
11005 {
11006 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11007 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11008 uErrorCode, 0);
11009 }
11010 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11011 }
11012
11013 /*
11014 * Read the legacy descriptor and maybe the long mode extensions if
11015 * required.
11016 */
11017 VBOXSTRICTRC rcStrict;
11018 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11019 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11020 else
11021 {
11022 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11023 if (rcStrict == VINF_SUCCESS)
11024 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11025 if (rcStrict == VINF_SUCCESS)
11026 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11027 if (rcStrict == VINF_SUCCESS)
11028 pDesc->Legacy.au16[3] = 0;
11029 else
11030 return rcStrict;
11031 }
11032
11033 if (rcStrict == VINF_SUCCESS)
11034 {
11035 if ( !IEM_IS_LONG_MODE(pVCpu)
11036 || pDesc->Legacy.Gen.u1DescType)
11037 pDesc->Long.au64[1] = 0;
11038 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11039 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11040 else
11041 {
11042 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11043 /** @todo is this the right exception? */
11044 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11045 }
11046 }
11047 return rcStrict;
11048}
11049
11050
11051/**
11052 * Fetches a descriptor table entry.
11053 *
11054 * @returns Strict VBox status code.
11055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11056 * @param pDesc Where to return the descriptor table entry.
11057 * @param uSel The selector which table entry to fetch.
11058 * @param uXcpt The exception to raise on table lookup error.
11059 */
11060IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11061{
11062 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11063}
11064
11065
11066/**
11067 * Fakes a long mode stack selector for SS = 0.
11068 *
11069 * @param pDescSs Where to return the fake stack descriptor.
11070 * @param uDpl The DPL we want.
11071 */
11072IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11073{
11074 pDescSs->Long.au64[0] = 0;
11075 pDescSs->Long.au64[1] = 0;
11076 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11077 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11078 pDescSs->Long.Gen.u2Dpl = uDpl;
11079 pDescSs->Long.Gen.u1Present = 1;
11080 pDescSs->Long.Gen.u1Long = 1;
11081}
11082
11083
11084/**
11085 * Marks the selector descriptor as accessed (only non-system descriptors).
11086 *
11087 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11088 * will therefore skip the limit checks.
11089 *
11090 * @returns Strict VBox status code.
11091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11092 * @param uSel The selector.
11093 */
11094IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11095{
11096 /*
11097 * Get the selector table base and calculate the entry address.
11098 */
11099 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11100 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11101 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11102 GCPtr += uSel & X86_SEL_MASK;
11103
11104 /*
11105 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11106 * ugly stuff to avoid this. This will make sure it's an atomic access
11107 * as well more or less remove any question about 8-bit or 32-bit accesss.
11108 */
11109 VBOXSTRICTRC rcStrict;
11110 uint32_t volatile *pu32;
11111 if ((GCPtr & 3) == 0)
11112 {
11113 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11114 GCPtr += 2 + 2;
11115 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11116 if (rcStrict != VINF_SUCCESS)
11117 return rcStrict;
11118 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11119 }
11120 else
11121 {
11122 /* The misaligned GDT/LDT case, map the whole thing. */
11123 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11124 if (rcStrict != VINF_SUCCESS)
11125 return rcStrict;
11126 switch ((uintptr_t)pu32 & 3)
11127 {
11128 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11129 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11130 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11131 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11132 }
11133 }
11134
11135 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11136}
11137
11138/** @} */
11139
11140
11141/*
11142 * Include the C/C++ implementation of instruction.
11143 */
11144#include "IEMAllCImpl.cpp.h"
11145
11146
11147
11148/** @name "Microcode" macros.
11149 *
11150 * The idea is that we should be able to use the same code to interpret
11151 * instructions as well as recompiler instructions. Thus this obfuscation.
11152 *
11153 * @{
11154 */
11155#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11156#define IEM_MC_END() }
11157#define IEM_MC_PAUSE() do {} while (0)
11158#define IEM_MC_CONTINUE() do {} while (0)
11159
11160/** Internal macro. */
11161#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11162 do \
11163 { \
11164 VBOXSTRICTRC rcStrict2 = a_Expr; \
11165 if (rcStrict2 != VINF_SUCCESS) \
11166 return rcStrict2; \
11167 } while (0)
11168
11169
11170#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11171#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11172#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11173#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11174#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11175#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11176#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11177#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11178#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11179 do { \
11180 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11181 return iemRaiseDeviceNotAvailable(pVCpu); \
11182 } while (0)
11183#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11184 do { \
11185 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11186 return iemRaiseDeviceNotAvailable(pVCpu); \
11187 } while (0)
11188#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11189 do { \
11190 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11191 return iemRaiseMathFault(pVCpu); \
11192 } while (0)
11193#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11194 do { \
11195 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11196 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11197 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11198 return iemRaiseUndefinedOpcode(pVCpu); \
11199 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11200 return iemRaiseDeviceNotAvailable(pVCpu); \
11201 } while (0)
11202#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11203 do { \
11204 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11205 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11206 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11207 return iemRaiseUndefinedOpcode(pVCpu); \
11208 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11209 return iemRaiseDeviceNotAvailable(pVCpu); \
11210 } while (0)
11211#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11212 do { \
11213 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11214 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11215 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11216 return iemRaiseUndefinedOpcode(pVCpu); \
11217 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11218 return iemRaiseDeviceNotAvailable(pVCpu); \
11219 } while (0)
11220#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11221 do { \
11222 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11223 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11224 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11225 return iemRaiseUndefinedOpcode(pVCpu); \
11226 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11227 return iemRaiseDeviceNotAvailable(pVCpu); \
11228 } while (0)
11229#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11230 do { \
11231 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11232 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11233 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11234 return iemRaiseUndefinedOpcode(pVCpu); \
11235 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11236 return iemRaiseDeviceNotAvailable(pVCpu); \
11237 } while (0)
11238#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11239 do { \
11240 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11241 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11242 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11243 return iemRaiseUndefinedOpcode(pVCpu); \
11244 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11245 return iemRaiseDeviceNotAvailable(pVCpu); \
11246 } while (0)
11247#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11248 do { \
11249 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11250 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11251 return iemRaiseUndefinedOpcode(pVCpu); \
11252 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11253 return iemRaiseDeviceNotAvailable(pVCpu); \
11254 } while (0)
11255#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11256 do { \
11257 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11258 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11259 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11260 return iemRaiseUndefinedOpcode(pVCpu); \
11261 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11262 return iemRaiseDeviceNotAvailable(pVCpu); \
11263 } while (0)
11264#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11265 do { \
11266 if (pVCpu->iem.s.uCpl != 0) \
11267 return iemRaiseGeneralProtectionFault0(pVCpu); \
11268 } while (0)
11269#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11270 do { \
11271 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11272 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11273 } while (0)
11274#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11275 do { \
11276 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11277 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11278 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11279 return iemRaiseUndefinedOpcode(pVCpu); \
11280 } while (0)
11281#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11282 do { \
11283 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11284 return iemRaiseGeneralProtectionFault0(pVCpu); \
11285 } while (0)
11286
11287
11288#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11289#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11290#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11291#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11292#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11293#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11294#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11295 uint32_t a_Name; \
11296 uint32_t *a_pName = &a_Name
11297#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11298 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11299
11300#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11301#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11302
11303#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11318#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11319#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11320#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11321 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11322 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11323 } while (0)
11324#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11325 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11326 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11327 } while (0)
11328#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11329 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11330 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11331 } while (0)
11332/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11333#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11334 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11335 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11336 } while (0)
11337#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11338 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11339 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11340 } while (0)
11341/** @note Not for IOPL or IF testing or modification. */
11342#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11343#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11344#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11345#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11346
11347#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11348#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11349#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11350#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11351#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11352#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11353#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11354#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11355#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11356#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11357/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11358#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11359 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11360 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11361 } while (0)
11362#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11363 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11364 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11365 } while (0)
11366#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11367 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11368
11369
11370#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11371#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11372/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11373 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11374#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11375#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11376/** @note Not for IOPL or IF testing or modification. */
11377#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11378
11379#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11380#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11381#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11382 do { \
11383 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11384 *pu32Reg += (a_u32Value); \
11385 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11386 } while (0)
11387#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11388
11389#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11390#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11391#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11392 do { \
11393 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11394 *pu32Reg -= (a_u32Value); \
11395 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11396 } while (0)
11397#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11398#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11399
11400#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11401#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11402#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11403#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11404#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11405#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11406#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11407
11408#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11409#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11410#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11411#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11412
11413#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11414#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11415#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11416
11417#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11418#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11419#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11420
11421#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11422#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11423#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11424
11425#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11426#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11427#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11428
11429#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11430
11431#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11432
11433#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11434#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11435#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11436 do { \
11437 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11438 *pu32Reg &= (a_u32Value); \
11439 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11440 } while (0)
11441#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11442
11443#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11444#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11445#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11446 do { \
11447 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11448 *pu32Reg |= (a_u32Value); \
11449 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11450 } while (0)
11451#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11452
11453
11454/** @note Not for IOPL or IF modification. */
11455#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11456/** @note Not for IOPL or IF modification. */
11457#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11458/** @note Not for IOPL or IF modification. */
11459#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11460
11461#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11462
11463/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11464#define IEM_MC_FPU_TO_MMX_MODE() do { \
11465 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11466 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11467 } while (0)
11468
11469/** Switches the FPU state from MMX mode (FTW=0xffff). */
11470#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11471 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11472 } while (0)
11473
11474#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11475 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11476#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11477 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11478#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11479 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11480 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11481 } while (0)
11482#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11483 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11484 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11485 } while (0)
11486#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11487 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11488#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11489 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11490#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11491 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11492
11493#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11494 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11495 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11496 } while (0)
11497#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11498 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11499#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11500 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11501#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11502 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11503#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11504 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11505 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11506 } while (0)
11507#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11508 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11509#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11510 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11511 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11512 } while (0)
11513#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11514 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11515#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11516 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11517 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11518 } while (0)
11519#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11520 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11521#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11522 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11523#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11524 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11525#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11526 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11527#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11528 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11529 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11530 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11531 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11532 } while (0)
11533
11534#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11535 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11536 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11537 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11538 } while (0)
11539#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11540 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11541 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11542 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11543 } while (0)
11544#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11545 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11546 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11547 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11548 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11549 } while (0)
11550#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11551 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11552 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11553 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11554 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11555 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11556 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11557 } while (0)
11558
11559#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11560#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11561 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11562 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11564 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11566 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11568 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11569 } while (0)
11570#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11571 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11572 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11573 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11574 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11575 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11577 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11578 } while (0)
11579#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11580 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11581 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11582 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11583 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11584 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11585 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11586 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11587 } while (0)
11588#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11589 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11590 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11591 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11592 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11593 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11594 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11595 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11596 } while (0)
11597
11598#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11599 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11600#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11601 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11602#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11603 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11604#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11605 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11606 uintptr_t const iYRegTmp = (a_iYReg); \
11607 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11608 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11609 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11610 } while (0)
11611
11612#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11613 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11614 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11615 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11616 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11617 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11618 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11619 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11620 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11621 } while (0)
11622#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11623 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11624 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11625 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11626 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11627 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11628 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11629 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11630 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11631 } while (0)
11632#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11633 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11634 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11635 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11636 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11637 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11638 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11639 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11640 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11641 } while (0)
11642
11643#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11644 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11645 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11646 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11647 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11648 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11649 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11650 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11651 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11652 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11653 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11654 } while (0)
11655#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11656 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11657 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11658 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11659 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11660 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11661 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11662 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11663 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11664 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11665 } while (0)
11666#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11667 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11668 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11669 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11670 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11671 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11672 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11673 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11674 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11675 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11676 } while (0)
11677#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11678 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11679 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11680 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11681 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11682 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11683 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11684 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11685 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11686 } while (0)
11687
11688#ifndef IEM_WITH_SETJMP
11689# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11691# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11693# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11695#else
11696# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11697 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11698# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11699 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11700# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11701 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11702#endif
11703
11704#ifndef IEM_WITH_SETJMP
11705# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11707# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11709# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11711#else
11712# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11713 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11714# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11715 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11716# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11717 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11718#endif
11719
11720#ifndef IEM_WITH_SETJMP
11721# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11723# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11725# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11727#else
11728# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11729 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11730# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11731 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11732# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11733 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11734#endif
11735
11736#ifdef SOME_UNUSED_FUNCTION
11737# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11739#endif
11740
11741#ifndef IEM_WITH_SETJMP
11742# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11744# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11746# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11749 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11750#else
11751# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11752 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11753# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11754 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11755# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11756 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11757# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11758 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11759#endif
11760
11761#ifndef IEM_WITH_SETJMP
11762# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11764# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11766# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11767 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11768#else
11769# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11770 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11771# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11772 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11773# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11774 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11775#endif
11776
11777#ifndef IEM_WITH_SETJMP
11778# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11779 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11780# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11782#else
11783# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11784 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11785# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11786 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11787#endif
11788
11789#ifndef IEM_WITH_SETJMP
11790# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11791 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11792# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11793 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11794#else
11795# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11796 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11797# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11798 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11799#endif
11800
11801
11802
11803#ifndef IEM_WITH_SETJMP
11804# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11805 do { \
11806 uint8_t u8Tmp; \
11807 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11808 (a_u16Dst) = u8Tmp; \
11809 } while (0)
11810# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11811 do { \
11812 uint8_t u8Tmp; \
11813 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11814 (a_u32Dst) = u8Tmp; \
11815 } while (0)
11816# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11817 do { \
11818 uint8_t u8Tmp; \
11819 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11820 (a_u64Dst) = u8Tmp; \
11821 } while (0)
11822# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11823 do { \
11824 uint16_t u16Tmp; \
11825 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11826 (a_u32Dst) = u16Tmp; \
11827 } while (0)
11828# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11829 do { \
11830 uint16_t u16Tmp; \
11831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11832 (a_u64Dst) = u16Tmp; \
11833 } while (0)
11834# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11835 do { \
11836 uint32_t u32Tmp; \
11837 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11838 (a_u64Dst) = u32Tmp; \
11839 } while (0)
11840#else /* IEM_WITH_SETJMP */
11841# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11842 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11843# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11844 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11845# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11846 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11847# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11848 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11849# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11850 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11851# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11852 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11853#endif /* IEM_WITH_SETJMP */
11854
11855#ifndef IEM_WITH_SETJMP
11856# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11857 do { \
11858 uint8_t u8Tmp; \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11860 (a_u16Dst) = (int8_t)u8Tmp; \
11861 } while (0)
11862# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11863 do { \
11864 uint8_t u8Tmp; \
11865 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11866 (a_u32Dst) = (int8_t)u8Tmp; \
11867 } while (0)
11868# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11869 do { \
11870 uint8_t u8Tmp; \
11871 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11872 (a_u64Dst) = (int8_t)u8Tmp; \
11873 } while (0)
11874# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11875 do { \
11876 uint16_t u16Tmp; \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11878 (a_u32Dst) = (int16_t)u16Tmp; \
11879 } while (0)
11880# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11881 do { \
11882 uint16_t u16Tmp; \
11883 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11884 (a_u64Dst) = (int16_t)u16Tmp; \
11885 } while (0)
11886# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11887 do { \
11888 uint32_t u32Tmp; \
11889 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11890 (a_u64Dst) = (int32_t)u32Tmp; \
11891 } while (0)
11892#else /* IEM_WITH_SETJMP */
11893# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11894 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11895# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11896 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11897# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11898 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11899# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11900 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11901# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11902 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11903# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11904 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11905#endif /* IEM_WITH_SETJMP */
11906
11907#ifndef IEM_WITH_SETJMP
11908# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11910# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11911 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11912# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11913 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11914# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11915 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11916#else
11917# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11918 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11919# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11920 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11921# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11922 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11923# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11924 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11925#endif
11926
11927#ifndef IEM_WITH_SETJMP
11928# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11929 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11930# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11931 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11932# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11933 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11934# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11935 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11936#else
11937# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11938 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11939# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11940 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11941# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11942 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11943# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11944 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11945#endif
11946
11947#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11948#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11949#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11950#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11951#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11952#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11953#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11954 do { \
11955 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11956 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11957 } while (0)
11958
11959#ifndef IEM_WITH_SETJMP
11960# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11961 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11962# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11963 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11964#else
11965# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11966 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11967# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11968 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11969#endif
11970
11971#ifndef IEM_WITH_SETJMP
11972# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11973 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11974# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11975 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11976#else
11977# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11978 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11979# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11980 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11981#endif
11982
11983
11984#define IEM_MC_PUSH_U16(a_u16Value) \
11985 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11986#define IEM_MC_PUSH_U32(a_u32Value) \
11987 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11988#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11989 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11990#define IEM_MC_PUSH_U64(a_u64Value) \
11991 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11992
11993#define IEM_MC_POP_U16(a_pu16Value) \
11994 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11995#define IEM_MC_POP_U32(a_pu32Value) \
11996 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11997#define IEM_MC_POP_U64(a_pu64Value) \
11998 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11999
12000/** Maps guest memory for direct or bounce buffered access.
12001 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12002 * @remarks May return.
12003 */
12004#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12005 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12006
12007/** Maps guest memory for direct or bounce buffered access.
12008 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12009 * @remarks May return.
12010 */
12011#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12012 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12013
12014/** Commits the memory and unmaps the guest memory.
12015 * @remarks May return.
12016 */
12017#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12018 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12019
12020/** Commits the memory and unmaps the guest memory unless the FPU status word
12021 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12022 * that would cause FLD not to store.
12023 *
12024 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12025 * store, while \#P will not.
12026 *
12027 * @remarks May in theory return - for now.
12028 */
12029#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12030 do { \
12031 if ( !(a_u16FSW & X86_FSW_ES) \
12032 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12033 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12034 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12035 } while (0)
12036
12037/** Calculate efficient address from R/M. */
12038#ifndef IEM_WITH_SETJMP
12039# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12040 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12041#else
12042# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12043 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12044#endif
12045
12046#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12047#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12048#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12049#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12050#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12051#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12052#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12053
12054/**
12055 * Defers the rest of the instruction emulation to a C implementation routine
12056 * and returns, only taking the standard parameters.
12057 *
12058 * @param a_pfnCImpl The pointer to the C routine.
12059 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12060 */
12061#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12062
12063/**
12064 * Defers the rest of instruction emulation to a C implementation routine and
12065 * returns, taking one argument in addition to the standard ones.
12066 *
12067 * @param a_pfnCImpl The pointer to the C routine.
12068 * @param a0 The argument.
12069 */
12070#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12071
12072/**
12073 * Defers the rest of the instruction emulation to a C implementation routine
12074 * and returns, taking two arguments in addition to the standard ones.
12075 *
12076 * @param a_pfnCImpl The pointer to the C routine.
12077 * @param a0 The first extra argument.
12078 * @param a1 The second extra argument.
12079 */
12080#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12081
12082/**
12083 * Defers the rest of the instruction emulation to a C implementation routine
12084 * and returns, taking three arguments in addition to the standard ones.
12085 *
12086 * @param a_pfnCImpl The pointer to the C routine.
12087 * @param a0 The first extra argument.
12088 * @param a1 The second extra argument.
12089 * @param a2 The third extra argument.
12090 */
12091#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12092
12093/**
12094 * Defers the rest of the instruction emulation to a C implementation routine
12095 * and returns, taking four arguments in addition to the standard ones.
12096 *
12097 * @param a_pfnCImpl The pointer to the C routine.
12098 * @param a0 The first extra argument.
12099 * @param a1 The second extra argument.
12100 * @param a2 The third extra argument.
12101 * @param a3 The fourth extra argument.
12102 */
12103#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12104
12105/**
12106 * Defers the rest of the instruction emulation to a C implementation routine
12107 * and returns, taking two arguments in addition to the standard ones.
12108 *
12109 * @param a_pfnCImpl The pointer to the C routine.
12110 * @param a0 The first extra argument.
12111 * @param a1 The second extra argument.
12112 * @param a2 The third extra argument.
12113 * @param a3 The fourth extra argument.
12114 * @param a4 The fifth extra argument.
12115 */
12116#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12117
12118/**
12119 * Defers the entire instruction emulation to a C implementation routine and
12120 * returns, only taking the standard parameters.
12121 *
12122 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12123 *
12124 * @param a_pfnCImpl The pointer to the C routine.
12125 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12126 */
12127#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12128
12129/**
12130 * Defers the entire instruction emulation to a C implementation routine and
12131 * returns, taking one argument in addition to the standard ones.
12132 *
12133 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12134 *
12135 * @param a_pfnCImpl The pointer to the C routine.
12136 * @param a0 The argument.
12137 */
12138#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12139
12140/**
12141 * Defers the entire instruction emulation to a C implementation routine and
12142 * returns, taking two arguments in addition to the standard ones.
12143 *
12144 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12145 *
12146 * @param a_pfnCImpl The pointer to the C routine.
12147 * @param a0 The first extra argument.
12148 * @param a1 The second extra argument.
12149 */
12150#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12151
12152/**
12153 * Defers the entire instruction emulation to a C implementation routine and
12154 * returns, taking three arguments in addition to the standard ones.
12155 *
12156 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12157 *
12158 * @param a_pfnCImpl The pointer to the C routine.
12159 * @param a0 The first extra argument.
12160 * @param a1 The second extra argument.
12161 * @param a2 The third extra argument.
12162 */
12163#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12164
12165/**
12166 * Calls a FPU assembly implementation taking one visible argument.
12167 *
12168 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12169 * @param a0 The first extra argument.
12170 */
12171#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12172 do { \
12173 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12174 } while (0)
12175
12176/**
12177 * Calls a FPU assembly implementation taking two visible arguments.
12178 *
12179 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12180 * @param a0 The first extra argument.
12181 * @param a1 The second extra argument.
12182 */
12183#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12184 do { \
12185 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12186 } while (0)
12187
12188/**
12189 * Calls a FPU assembly implementation taking three visible arguments.
12190 *
12191 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12192 * @param a0 The first extra argument.
12193 * @param a1 The second extra argument.
12194 * @param a2 The third extra argument.
12195 */
12196#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12197 do { \
12198 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12199 } while (0)
12200
12201#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12202 do { \
12203 (a_FpuData).FSW = (a_FSW); \
12204 (a_FpuData).r80Result = *(a_pr80Value); \
12205 } while (0)
12206
12207/** Pushes FPU result onto the stack. */
12208#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12209 iemFpuPushResult(pVCpu, &a_FpuData)
12210/** Pushes FPU result onto the stack and sets the FPUDP. */
12211#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12212 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12213
12214/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12215#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12216 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12217
12218/** Stores FPU result in a stack register. */
12219#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12220 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12221/** Stores FPU result in a stack register and pops the stack. */
12222#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12223 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12224/** Stores FPU result in a stack register and sets the FPUDP. */
12225#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12226 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12227/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12228 * stack. */
12229#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12230 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12231
12232/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12233#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12234 iemFpuUpdateOpcodeAndIp(pVCpu)
12235/** Free a stack register (for FFREE and FFREEP). */
12236#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12237 iemFpuStackFree(pVCpu, a_iStReg)
12238/** Increment the FPU stack pointer. */
12239#define IEM_MC_FPU_STACK_INC_TOP() \
12240 iemFpuStackIncTop(pVCpu)
12241/** Decrement the FPU stack pointer. */
12242#define IEM_MC_FPU_STACK_DEC_TOP() \
12243 iemFpuStackDecTop(pVCpu)
12244
12245/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12246#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12247 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12248/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12249#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12250 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12251/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12252#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12253 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12254/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12255#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12256 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12257/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12258 * stack. */
12259#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12260 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12261/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12262#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12263 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12264
12265/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12266#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12267 iemFpuStackUnderflow(pVCpu, a_iStDst)
12268/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12269 * stack. */
12270#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12271 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12272/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12273 * FPUDS. */
12274#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12275 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12276/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12277 * FPUDS. Pops stack. */
12278#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12279 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12280/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12281 * stack twice. */
12282#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12283 iemFpuStackUnderflowThenPopPop(pVCpu)
12284/** Raises a FPU stack underflow exception for an instruction pushing a result
12285 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12286#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12287 iemFpuStackPushUnderflow(pVCpu)
12288/** Raises a FPU stack underflow exception for an instruction pushing a result
12289 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12290#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12291 iemFpuStackPushUnderflowTwo(pVCpu)
12292
12293/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12294 * FPUIP, FPUCS and FOP. */
12295#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12296 iemFpuStackPushOverflow(pVCpu)
12297/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12298 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12299#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12300 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12301/** Prepares for using the FPU state.
12302 * Ensures that we can use the host FPU in the current context (RC+R0.
12303 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12304#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12305/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12306#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12307/** Actualizes the guest FPU state so it can be accessed and modified. */
12308#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12309
12310/** Prepares for using the SSE state.
12311 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12312 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12313#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12314/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12315#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12316/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12317#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12318
12319/** Prepares for using the AVX state.
12320 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12321 * Ensures the guest AVX state in the CPUMCTX is up to date.
12322 * @note This will include the AVX512 state too when support for it is added
12323 * due to the zero extending feature of VEX instruction. */
12324#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12325/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12326#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12327/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12328#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12329
12330/**
12331 * Calls a MMX assembly implementation taking two visible arguments.
12332 *
12333 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12334 * @param a0 The first extra argument.
12335 * @param a1 The second extra argument.
12336 */
12337#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12338 do { \
12339 IEM_MC_PREPARE_FPU_USAGE(); \
12340 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12341 } while (0)
12342
12343/**
12344 * Calls a MMX assembly implementation taking three visible arguments.
12345 *
12346 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12347 * @param a0 The first extra argument.
12348 * @param a1 The second extra argument.
12349 * @param a2 The third extra argument.
12350 */
12351#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12352 do { \
12353 IEM_MC_PREPARE_FPU_USAGE(); \
12354 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12355 } while (0)
12356
12357
12358/**
12359 * Calls a SSE assembly implementation taking two visible arguments.
12360 *
12361 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12362 * @param a0 The first extra argument.
12363 * @param a1 The second extra argument.
12364 */
12365#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12366 do { \
12367 IEM_MC_PREPARE_SSE_USAGE(); \
12368 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12369 } while (0)
12370
12371/**
12372 * Calls a SSE assembly implementation taking three visible arguments.
12373 *
12374 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12375 * @param a0 The first extra argument.
12376 * @param a1 The second extra argument.
12377 * @param a2 The third extra argument.
12378 */
12379#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12380 do { \
12381 IEM_MC_PREPARE_SSE_USAGE(); \
12382 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12383 } while (0)
12384
12385
12386/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12387 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12388#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12389 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12390
12391/**
12392 * Calls a AVX assembly implementation taking two visible arguments.
12393 *
12394 * There is one implicit zero'th argument, a pointer to the extended state.
12395 *
12396 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12397 * @param a1 The first extra argument.
12398 * @param a2 The second extra argument.
12399 */
12400#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12401 do { \
12402 IEM_MC_PREPARE_AVX_USAGE(); \
12403 a_pfnAImpl(pXState, (a1), (a2)); \
12404 } while (0)
12405
12406/**
12407 * Calls a AVX assembly implementation taking three visible arguments.
12408 *
12409 * There is one implicit zero'th argument, a pointer to the extended state.
12410 *
12411 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12412 * @param a1 The first extra argument.
12413 * @param a2 The second extra argument.
12414 * @param a3 The third extra argument.
12415 */
12416#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12417 do { \
12418 IEM_MC_PREPARE_AVX_USAGE(); \
12419 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12420 } while (0)
12421
12422/** @note Not for IOPL or IF testing. */
12423#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12424/** @note Not for IOPL or IF testing. */
12425#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12426/** @note Not for IOPL or IF testing. */
12427#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12428/** @note Not for IOPL or IF testing. */
12429#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12430/** @note Not for IOPL or IF testing. */
12431#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12432 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12433 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12434/** @note Not for IOPL or IF testing. */
12435#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12436 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12437 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12438/** @note Not for IOPL or IF testing. */
12439#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12440 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12441 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12442 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12443/** @note Not for IOPL or IF testing. */
12444#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12445 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12446 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12447 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12448#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12449#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12450#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12451/** @note Not for IOPL or IF testing. */
12452#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12453 if ( pVCpu->cpum.GstCtx.cx != 0 \
12454 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12455/** @note Not for IOPL or IF testing. */
12456#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12457 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12458 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12459/** @note Not for IOPL or IF testing. */
12460#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12461 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12462 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12463/** @note Not for IOPL or IF testing. */
12464#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12465 if ( pVCpu->cpum.GstCtx.cx != 0 \
12466 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12467/** @note Not for IOPL or IF testing. */
12468#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12469 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12470 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12471/** @note Not for IOPL or IF testing. */
12472#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12473 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12474 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12475#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12476#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12477
12478#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12479 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12480#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12481 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12482#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12483 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12484#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12485 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12486#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12487 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12488#define IEM_MC_IF_FCW_IM() \
12489 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12490
12491#define IEM_MC_ELSE() } else {
12492#define IEM_MC_ENDIF() } do {} while (0)
12493
12494/** @} */
12495
12496
12497/** @name Opcode Debug Helpers.
12498 * @{
12499 */
12500#ifdef VBOX_WITH_STATISTICS
12501# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12502#else
12503# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12504#endif
12505
12506#ifdef DEBUG
12507# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12508 do { \
12509 IEMOP_INC_STATS(a_Stats); \
12510 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12511 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12512 } while (0)
12513
12514# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12515 do { \
12516 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12517 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12518 (void)RT_CONCAT(OP_,a_Upper); \
12519 (void)(a_fDisHints); \
12520 (void)(a_fIemHints); \
12521 } while (0)
12522
12523# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12524 do { \
12525 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12526 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12527 (void)RT_CONCAT(OP_,a_Upper); \
12528 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12529 (void)(a_fDisHints); \
12530 (void)(a_fIemHints); \
12531 } while (0)
12532
12533# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12534 do { \
12535 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12536 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12537 (void)RT_CONCAT(OP_,a_Upper); \
12538 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12539 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12540 (void)(a_fDisHints); \
12541 (void)(a_fIemHints); \
12542 } while (0)
12543
12544# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12545 do { \
12546 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12547 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12548 (void)RT_CONCAT(OP_,a_Upper); \
12549 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12550 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12551 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12552 (void)(a_fDisHints); \
12553 (void)(a_fIemHints); \
12554 } while (0)
12555
12556# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12557 do { \
12558 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12559 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12560 (void)RT_CONCAT(OP_,a_Upper); \
12561 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12562 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12563 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12564 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12565 (void)(a_fDisHints); \
12566 (void)(a_fIemHints); \
12567 } while (0)
12568
12569#else
12570# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12571
12572# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12573 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12574# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12575 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12576# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12577 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12578# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12579 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12580# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12581 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12582
12583#endif
12584
12585#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12586 IEMOP_MNEMONIC0EX(a_Lower, \
12587 #a_Lower, \
12588 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12589#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12590 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12591 #a_Lower " " #a_Op1, \
12592 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12593#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12594 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12595 #a_Lower " " #a_Op1 "," #a_Op2, \
12596 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12597#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12598 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12599 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12600 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12601#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12602 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12603 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12604 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12605
12606/** @} */
12607
12608
12609/** @name Opcode Helpers.
12610 * @{
12611 */
12612
12613#ifdef IN_RING3
12614# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12615 do { \
12616 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12617 else \
12618 { \
12619 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12620 return IEMOP_RAISE_INVALID_OPCODE(); \
12621 } \
12622 } while (0)
12623#else
12624# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12625 do { \
12626 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12627 else return IEMOP_RAISE_INVALID_OPCODE(); \
12628 } while (0)
12629#endif
12630
12631/** The instruction requires a 186 or later. */
12632#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12633# define IEMOP_HLP_MIN_186() do { } while (0)
12634#else
12635# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12636#endif
12637
12638/** The instruction requires a 286 or later. */
12639#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12640# define IEMOP_HLP_MIN_286() do { } while (0)
12641#else
12642# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12643#endif
12644
12645/** The instruction requires a 386 or later. */
12646#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12647# define IEMOP_HLP_MIN_386() do { } while (0)
12648#else
12649# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12650#endif
12651
12652/** The instruction requires a 386 or later if the given expression is true. */
12653#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12654# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12655#else
12656# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12657#endif
12658
12659/** The instruction requires a 486 or later. */
12660#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12661# define IEMOP_HLP_MIN_486() do { } while (0)
12662#else
12663# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12664#endif
12665
12666/** The instruction requires a Pentium (586) or later. */
12667#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12668# define IEMOP_HLP_MIN_586() do { } while (0)
12669#else
12670# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12671#endif
12672
12673/** The instruction requires a PentiumPro (686) or later. */
12674#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12675# define IEMOP_HLP_MIN_686() do { } while (0)
12676#else
12677# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12678#endif
12679
12680
12681/** The instruction raises an \#UD in real and V8086 mode. */
12682#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12683 do \
12684 { \
12685 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12686 else return IEMOP_RAISE_INVALID_OPCODE(); \
12687 } while (0)
12688
12689#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12690/** This instruction raises an \#UD in real and V8086 mode or when not using a
12691 * 64-bit code segment when in long mode (applicable to all VMX instructions
12692 * except VMCALL).
12693 */
12694#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12695 do \
12696 { \
12697 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12698 && ( !IEM_IS_LONG_MODE(pVCpu) \
12699 || IEM_IS_64BIT_CODE(pVCpu))) \
12700 { /* likely */ } \
12701 else \
12702 { \
12703 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12704 { \
12705 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12706 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12707 return IEMOP_RAISE_INVALID_OPCODE(); \
12708 } \
12709 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12710 { \
12711 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12712 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12713 return IEMOP_RAISE_INVALID_OPCODE(); \
12714 } \
12715 } \
12716 } while (0)
12717
12718/** The instruction can only be executed in VMX operation (VMX root mode and
12719 * non-root mode).
12720 *
12721 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12722 */
12723# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12724 do \
12725 { \
12726 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12727 else \
12728 { \
12729 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12730 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12731 return IEMOP_RAISE_INVALID_OPCODE(); \
12732 } \
12733 } while (0)
12734#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12735
12736/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12737 * 64-bit mode. */
12738#define IEMOP_HLP_NO_64BIT() \
12739 do \
12740 { \
12741 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12742 return IEMOP_RAISE_INVALID_OPCODE(); \
12743 } while (0)
12744
12745/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12746 * 64-bit mode. */
12747#define IEMOP_HLP_ONLY_64BIT() \
12748 do \
12749 { \
12750 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12751 return IEMOP_RAISE_INVALID_OPCODE(); \
12752 } while (0)
12753
12754/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12755#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12756 do \
12757 { \
12758 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12759 iemRecalEffOpSize64Default(pVCpu); \
12760 } while (0)
12761
12762/** The instruction has 64-bit operand size if 64-bit mode. */
12763#define IEMOP_HLP_64BIT_OP_SIZE() \
12764 do \
12765 { \
12766 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12767 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12768 } while (0)
12769
12770/** Only a REX prefix immediately preceeding the first opcode byte takes
12771 * effect. This macro helps ensuring this as well as logging bad guest code. */
12772#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12773 do \
12774 { \
12775 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12776 { \
12777 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12778 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12779 pVCpu->iem.s.uRexB = 0; \
12780 pVCpu->iem.s.uRexIndex = 0; \
12781 pVCpu->iem.s.uRexReg = 0; \
12782 iemRecalEffOpSize(pVCpu); \
12783 } \
12784 } while (0)
12785
12786/**
12787 * Done decoding.
12788 */
12789#define IEMOP_HLP_DONE_DECODING() \
12790 do \
12791 { \
12792 /*nothing for now, maybe later... */ \
12793 } while (0)
12794
12795/**
12796 * Done decoding, raise \#UD exception if lock prefix present.
12797 */
12798#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12799 do \
12800 { \
12801 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12802 { /* likely */ } \
12803 else \
12804 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12805 } while (0)
12806
12807
12808/**
12809 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12810 * repnz or size prefixes are present, or if in real or v8086 mode.
12811 */
12812#define IEMOP_HLP_DONE_VEX_DECODING() \
12813 do \
12814 { \
12815 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12816 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12817 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12818 { /* likely */ } \
12819 else \
12820 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12821 } while (0)
12822
12823/**
12824 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12825 * repnz or size prefixes are present, or if in real or v8086 mode.
12826 */
12827#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12828 do \
12829 { \
12830 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12831 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12832 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12833 && pVCpu->iem.s.uVexLength == 0)) \
12834 { /* likely */ } \
12835 else \
12836 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12837 } while (0)
12838
12839
12840/**
12841 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12842 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12843 * register 0, or if in real or v8086 mode.
12844 */
12845#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12846 do \
12847 { \
12848 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12849 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12850 && !pVCpu->iem.s.uVex3rdReg \
12851 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12852 { /* likely */ } \
12853 else \
12854 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12855 } while (0)
12856
12857/**
12858 * Done decoding VEX, no V, L=0.
12859 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12860 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12861 */
12862#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12863 do \
12864 { \
12865 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12866 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12867 && pVCpu->iem.s.uVexLength == 0 \
12868 && pVCpu->iem.s.uVex3rdReg == 0 \
12869 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12870 { /* likely */ } \
12871 else \
12872 return IEMOP_RAISE_INVALID_OPCODE(); \
12873 } while (0)
12874
12875#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12876 do \
12877 { \
12878 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12879 { /* likely */ } \
12880 else \
12881 { \
12882 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12883 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12884 } \
12885 } while (0)
12886#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12887 do \
12888 { \
12889 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12890 { /* likely */ } \
12891 else \
12892 { \
12893 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12894 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12895 } \
12896 } while (0)
12897
12898/**
12899 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12900 * are present.
12901 */
12902#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12903 do \
12904 { \
12905 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12906 { /* likely */ } \
12907 else \
12908 return IEMOP_RAISE_INVALID_OPCODE(); \
12909 } while (0)
12910
12911/**
12912 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12913 * prefixes are present.
12914 */
12915#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12916 do \
12917 { \
12918 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12919 { /* likely */ } \
12920 else \
12921 return IEMOP_RAISE_INVALID_OPCODE(); \
12922 } while (0)
12923
12924
12925/**
12926 * Calculates the effective address of a ModR/M memory operand.
12927 *
12928 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12929 *
12930 * @return Strict VBox status code.
12931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12932 * @param bRm The ModRM byte.
12933 * @param cbImm The size of any immediate following the
12934 * effective address opcode bytes. Important for
12935 * RIP relative addressing.
12936 * @param pGCPtrEff Where to return the effective address.
12937 */
12938IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12939{
12940 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12941# define SET_SS_DEF() \
12942 do \
12943 { \
12944 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12945 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12946 } while (0)
12947
12948 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12949 {
12950/** @todo Check the effective address size crap! */
12951 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12952 {
12953 uint16_t u16EffAddr;
12954
12955 /* Handle the disp16 form with no registers first. */
12956 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12957 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12958 else
12959 {
12960 /* Get the displacment. */
12961 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12962 {
12963 case 0: u16EffAddr = 0; break;
12964 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12965 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12966 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12967 }
12968
12969 /* Add the base and index registers to the disp. */
12970 switch (bRm & X86_MODRM_RM_MASK)
12971 {
12972 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12973 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12974 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12975 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12976 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12977 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12978 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12979 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12980 }
12981 }
12982
12983 *pGCPtrEff = u16EffAddr;
12984 }
12985 else
12986 {
12987 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12988 uint32_t u32EffAddr;
12989
12990 /* Handle the disp32 form with no registers first. */
12991 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12992 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12993 else
12994 {
12995 /* Get the register (or SIB) value. */
12996 switch ((bRm & X86_MODRM_RM_MASK))
12997 {
12998 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12999 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13000 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13001 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13002 case 4: /* SIB */
13003 {
13004 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13005
13006 /* Get the index and scale it. */
13007 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13008 {
13009 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13010 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13011 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13012 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13013 case 4: u32EffAddr = 0; /*none */ break;
13014 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13015 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13016 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13017 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13018 }
13019 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13020
13021 /* add base */
13022 switch (bSib & X86_SIB_BASE_MASK)
13023 {
13024 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13025 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13026 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13027 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13028 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13029 case 5:
13030 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13031 {
13032 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13033 SET_SS_DEF();
13034 }
13035 else
13036 {
13037 uint32_t u32Disp;
13038 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13039 u32EffAddr += u32Disp;
13040 }
13041 break;
13042 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13043 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13044 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13045 }
13046 break;
13047 }
13048 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13049 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13050 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13051 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13052 }
13053
13054 /* Get and add the displacement. */
13055 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13056 {
13057 case 0:
13058 break;
13059 case 1:
13060 {
13061 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13062 u32EffAddr += i8Disp;
13063 break;
13064 }
13065 case 2:
13066 {
13067 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13068 u32EffAddr += u32Disp;
13069 break;
13070 }
13071 default:
13072 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13073 }
13074
13075 }
13076 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13077 *pGCPtrEff = u32EffAddr;
13078 else
13079 {
13080 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13081 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13082 }
13083 }
13084 }
13085 else
13086 {
13087 uint64_t u64EffAddr;
13088
13089 /* Handle the rip+disp32 form with no registers first. */
13090 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13091 {
13092 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13093 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13094 }
13095 else
13096 {
13097 /* Get the register (or SIB) value. */
13098 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13099 {
13100 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13101 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13102 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13103 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13104 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13105 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13106 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13107 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13108 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13109 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13110 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13111 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13112 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13113 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13114 /* SIB */
13115 case 4:
13116 case 12:
13117 {
13118 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13119
13120 /* Get the index and scale it. */
13121 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13122 {
13123 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13124 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13125 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13126 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13127 case 4: u64EffAddr = 0; /*none */ break;
13128 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13129 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13130 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13131 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13132 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13133 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13134 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13135 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13136 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13137 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13138 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13139 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13140 }
13141 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13142
13143 /* add base */
13144 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13145 {
13146 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13147 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13148 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13149 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13150 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13151 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13152 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13153 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13154 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13155 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13156 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13157 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13158 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13159 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13160 /* complicated encodings */
13161 case 5:
13162 case 13:
13163 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13164 {
13165 if (!pVCpu->iem.s.uRexB)
13166 {
13167 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13168 SET_SS_DEF();
13169 }
13170 else
13171 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13172 }
13173 else
13174 {
13175 uint32_t u32Disp;
13176 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13177 u64EffAddr += (int32_t)u32Disp;
13178 }
13179 break;
13180 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13181 }
13182 break;
13183 }
13184 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13185 }
13186
13187 /* Get and add the displacement. */
13188 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13189 {
13190 case 0:
13191 break;
13192 case 1:
13193 {
13194 int8_t i8Disp;
13195 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13196 u64EffAddr += i8Disp;
13197 break;
13198 }
13199 case 2:
13200 {
13201 uint32_t u32Disp;
13202 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13203 u64EffAddr += (int32_t)u32Disp;
13204 break;
13205 }
13206 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13207 }
13208
13209 }
13210
13211 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13212 *pGCPtrEff = u64EffAddr;
13213 else
13214 {
13215 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13216 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13217 }
13218 }
13219
13220 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13221 return VINF_SUCCESS;
13222}
13223
13224
13225/**
13226 * Calculates the effective address of a ModR/M memory operand.
13227 *
13228 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13229 *
13230 * @return Strict VBox status code.
13231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13232 * @param bRm The ModRM byte.
13233 * @param cbImm The size of any immediate following the
13234 * effective address opcode bytes. Important for
13235 * RIP relative addressing.
13236 * @param pGCPtrEff Where to return the effective address.
13237 * @param offRsp RSP displacement.
13238 */
13239IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13240{
13241 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13242# define SET_SS_DEF() \
13243 do \
13244 { \
13245 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13246 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13247 } while (0)
13248
13249 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13250 {
13251/** @todo Check the effective address size crap! */
13252 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13253 {
13254 uint16_t u16EffAddr;
13255
13256 /* Handle the disp16 form with no registers first. */
13257 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13258 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13259 else
13260 {
13261 /* Get the displacment. */
13262 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13263 {
13264 case 0: u16EffAddr = 0; break;
13265 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13266 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13267 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13268 }
13269
13270 /* Add the base and index registers to the disp. */
13271 switch (bRm & X86_MODRM_RM_MASK)
13272 {
13273 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13274 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13275 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13276 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13277 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13278 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13279 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13280 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13281 }
13282 }
13283
13284 *pGCPtrEff = u16EffAddr;
13285 }
13286 else
13287 {
13288 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13289 uint32_t u32EffAddr;
13290
13291 /* Handle the disp32 form with no registers first. */
13292 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13293 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13294 else
13295 {
13296 /* Get the register (or SIB) value. */
13297 switch ((bRm & X86_MODRM_RM_MASK))
13298 {
13299 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13300 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13301 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13302 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13303 case 4: /* SIB */
13304 {
13305 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13306
13307 /* Get the index and scale it. */
13308 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13309 {
13310 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13311 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13312 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13313 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13314 case 4: u32EffAddr = 0; /*none */ break;
13315 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13316 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13317 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13318 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13319 }
13320 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13321
13322 /* add base */
13323 switch (bSib & X86_SIB_BASE_MASK)
13324 {
13325 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13326 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13327 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13328 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13329 case 4:
13330 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13331 SET_SS_DEF();
13332 break;
13333 case 5:
13334 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13335 {
13336 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13337 SET_SS_DEF();
13338 }
13339 else
13340 {
13341 uint32_t u32Disp;
13342 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13343 u32EffAddr += u32Disp;
13344 }
13345 break;
13346 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13347 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13349 }
13350 break;
13351 }
13352 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13353 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13354 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13355 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13356 }
13357
13358 /* Get and add the displacement. */
13359 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13360 {
13361 case 0:
13362 break;
13363 case 1:
13364 {
13365 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13366 u32EffAddr += i8Disp;
13367 break;
13368 }
13369 case 2:
13370 {
13371 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13372 u32EffAddr += u32Disp;
13373 break;
13374 }
13375 default:
13376 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13377 }
13378
13379 }
13380 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13381 *pGCPtrEff = u32EffAddr;
13382 else
13383 {
13384 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13385 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13386 }
13387 }
13388 }
13389 else
13390 {
13391 uint64_t u64EffAddr;
13392
13393 /* Handle the rip+disp32 form with no registers first. */
13394 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13395 {
13396 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13397 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13398 }
13399 else
13400 {
13401 /* Get the register (or SIB) value. */
13402 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13403 {
13404 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13405 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13406 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13407 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13408 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13409 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13410 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13411 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13412 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13413 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13414 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13415 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13416 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13417 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13418 /* SIB */
13419 case 4:
13420 case 12:
13421 {
13422 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13423
13424 /* Get the index and scale it. */
13425 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13426 {
13427 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13428 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13429 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13430 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13431 case 4: u64EffAddr = 0; /*none */ break;
13432 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13433 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13434 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13435 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13436 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13437 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13438 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13439 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13440 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13441 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13442 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13443 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13444 }
13445 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13446
13447 /* add base */
13448 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13449 {
13450 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13451 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13452 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13453 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13454 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13455 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13456 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13457 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13458 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13459 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13460 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13461 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13462 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13463 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13464 /* complicated encodings */
13465 case 5:
13466 case 13:
13467 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13468 {
13469 if (!pVCpu->iem.s.uRexB)
13470 {
13471 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13472 SET_SS_DEF();
13473 }
13474 else
13475 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13476 }
13477 else
13478 {
13479 uint32_t u32Disp;
13480 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13481 u64EffAddr += (int32_t)u32Disp;
13482 }
13483 break;
13484 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13485 }
13486 break;
13487 }
13488 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13489 }
13490
13491 /* Get and add the displacement. */
13492 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13493 {
13494 case 0:
13495 break;
13496 case 1:
13497 {
13498 int8_t i8Disp;
13499 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13500 u64EffAddr += i8Disp;
13501 break;
13502 }
13503 case 2:
13504 {
13505 uint32_t u32Disp;
13506 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13507 u64EffAddr += (int32_t)u32Disp;
13508 break;
13509 }
13510 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13511 }
13512
13513 }
13514
13515 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13516 *pGCPtrEff = u64EffAddr;
13517 else
13518 {
13519 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13520 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13521 }
13522 }
13523
13524 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13525 return VINF_SUCCESS;
13526}
13527
13528
13529#ifdef IEM_WITH_SETJMP
13530/**
13531 * Calculates the effective address of a ModR/M memory operand.
13532 *
13533 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13534 *
13535 * May longjmp on internal error.
13536 *
13537 * @return The effective address.
13538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13539 * @param bRm The ModRM byte.
13540 * @param cbImm The size of any immediate following the
13541 * effective address opcode bytes. Important for
13542 * RIP relative addressing.
13543 */
13544IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13545{
13546 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13547# define SET_SS_DEF() \
13548 do \
13549 { \
13550 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13551 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13552 } while (0)
13553
13554 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13555 {
13556/** @todo Check the effective address size crap! */
13557 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13558 {
13559 uint16_t u16EffAddr;
13560
13561 /* Handle the disp16 form with no registers first. */
13562 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13563 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13564 else
13565 {
13566 /* Get the displacment. */
13567 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13568 {
13569 case 0: u16EffAddr = 0; break;
13570 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13571 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13572 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13573 }
13574
13575 /* Add the base and index registers to the disp. */
13576 switch (bRm & X86_MODRM_RM_MASK)
13577 {
13578 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13579 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13580 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13581 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13582 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13583 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13584 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13585 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13586 }
13587 }
13588
13589 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13590 return u16EffAddr;
13591 }
13592
13593 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13594 uint32_t u32EffAddr;
13595
13596 /* Handle the disp32 form with no registers first. */
13597 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13598 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13599 else
13600 {
13601 /* Get the register (or SIB) value. */
13602 switch ((bRm & X86_MODRM_RM_MASK))
13603 {
13604 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13605 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13606 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13607 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13608 case 4: /* SIB */
13609 {
13610 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13611
13612 /* Get the index and scale it. */
13613 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13614 {
13615 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13616 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13617 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13618 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13619 case 4: u32EffAddr = 0; /*none */ break;
13620 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13621 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13622 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13623 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13624 }
13625 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13626
13627 /* add base */
13628 switch (bSib & X86_SIB_BASE_MASK)
13629 {
13630 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13631 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13632 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13633 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13634 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13635 case 5:
13636 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13637 {
13638 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13639 SET_SS_DEF();
13640 }
13641 else
13642 {
13643 uint32_t u32Disp;
13644 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13645 u32EffAddr += u32Disp;
13646 }
13647 break;
13648 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13649 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13650 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13651 }
13652 break;
13653 }
13654 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13655 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13656 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13657 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13658 }
13659
13660 /* Get and add the displacement. */
13661 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13662 {
13663 case 0:
13664 break;
13665 case 1:
13666 {
13667 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13668 u32EffAddr += i8Disp;
13669 break;
13670 }
13671 case 2:
13672 {
13673 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13674 u32EffAddr += u32Disp;
13675 break;
13676 }
13677 default:
13678 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13679 }
13680 }
13681
13682 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13683 {
13684 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13685 return u32EffAddr;
13686 }
13687 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13688 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13689 return u32EffAddr & UINT16_MAX;
13690 }
13691
13692 uint64_t u64EffAddr;
13693
13694 /* Handle the rip+disp32 form with no registers first. */
13695 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13696 {
13697 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13698 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13699 }
13700 else
13701 {
13702 /* Get the register (or SIB) value. */
13703 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13704 {
13705 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13706 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13707 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13708 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13709 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13710 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13711 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13712 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13713 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13714 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13715 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13716 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13717 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13718 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13719 /* SIB */
13720 case 4:
13721 case 12:
13722 {
13723 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13724
13725 /* Get the index and scale it. */
13726 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13727 {
13728 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13729 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13730 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13731 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13732 case 4: u64EffAddr = 0; /*none */ break;
13733 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13734 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13735 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13736 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13737 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13738 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13739 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13740 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13741 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13742 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13743 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13744 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13745 }
13746 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13747
13748 /* add base */
13749 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13750 {
13751 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13752 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13753 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13754 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13755 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13756 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13757 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13758 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13759 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13760 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13761 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13762 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13763 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13764 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13765 /* complicated encodings */
13766 case 5:
13767 case 13:
13768 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13769 {
13770 if (!pVCpu->iem.s.uRexB)
13771 {
13772 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13773 SET_SS_DEF();
13774 }
13775 else
13776 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13777 }
13778 else
13779 {
13780 uint32_t u32Disp;
13781 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13782 u64EffAddr += (int32_t)u32Disp;
13783 }
13784 break;
13785 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13786 }
13787 break;
13788 }
13789 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13790 }
13791
13792 /* Get and add the displacement. */
13793 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13794 {
13795 case 0:
13796 break;
13797 case 1:
13798 {
13799 int8_t i8Disp;
13800 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13801 u64EffAddr += i8Disp;
13802 break;
13803 }
13804 case 2:
13805 {
13806 uint32_t u32Disp;
13807 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13808 u64EffAddr += (int32_t)u32Disp;
13809 break;
13810 }
13811 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13812 }
13813
13814 }
13815
13816 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13817 {
13818 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13819 return u64EffAddr;
13820 }
13821 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13822 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13823 return u64EffAddr & UINT32_MAX;
13824}
13825#endif /* IEM_WITH_SETJMP */
13826
13827/** @} */
13828
13829
13830
13831/*
13832 * Include the instructions
13833 */
13834#include "IEMAllInstructions.cpp.h"
13835
13836
13837
13838#ifdef LOG_ENABLED
13839/**
13840 * Logs the current instruction.
13841 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13842 * @param fSameCtx Set if we have the same context information as the VMM,
13843 * clear if we may have already executed an instruction in
13844 * our debug context. When clear, we assume IEMCPU holds
13845 * valid CPU mode info.
13846 *
13847 * The @a fSameCtx parameter is now misleading and obsolete.
13848 * @param pszFunction The IEM function doing the execution.
13849 */
13850IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13851{
13852# ifdef IN_RING3
13853 if (LogIs2Enabled())
13854 {
13855 char szInstr[256];
13856 uint32_t cbInstr = 0;
13857 if (fSameCtx)
13858 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13859 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13860 szInstr, sizeof(szInstr), &cbInstr);
13861 else
13862 {
13863 uint32_t fFlags = 0;
13864 switch (pVCpu->iem.s.enmCpuMode)
13865 {
13866 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13867 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13868 case IEMMODE_16BIT:
13869 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13870 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13871 else
13872 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13873 break;
13874 }
13875 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13876 szInstr, sizeof(szInstr), &cbInstr);
13877 }
13878
13879 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13880 Log2(("**** %s\n"
13881 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13882 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13883 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13884 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13885 " %s\n"
13886 , pszFunction,
13887 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13888 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13889 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13890 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13891 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13892 szInstr));
13893
13894 if (LogIs3Enabled())
13895 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13896 }
13897 else
13898# endif
13899 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13900 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13901 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13902}
13903#endif /* LOG_ENABLED */
13904
13905
13906/**
13907 * Makes status code addjustments (pass up from I/O and access handler)
13908 * as well as maintaining statistics.
13909 *
13910 * @returns Strict VBox status code to pass up.
13911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13912 * @param rcStrict The status from executing an instruction.
13913 */
13914DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13915{
13916 if (rcStrict != VINF_SUCCESS)
13917 {
13918 if (RT_SUCCESS(rcStrict))
13919 {
13920 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13921 || rcStrict == VINF_IOM_R3_IOPORT_READ
13922 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13923 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13924 || rcStrict == VINF_IOM_R3_MMIO_READ
13925 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13926 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13927 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13928 || rcStrict == VINF_CPUM_R3_MSR_READ
13929 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13930 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13931 || rcStrict == VINF_EM_RAW_TO_R3
13932 || rcStrict == VINF_EM_TRIPLE_FAULT
13933 || rcStrict == VINF_GIM_R3_HYPERCALL
13934 /* raw-mode / virt handlers only: */
13935 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13936 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13937 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13938 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13939 || rcStrict == VINF_SELM_SYNC_GDT
13940 || rcStrict == VINF_CSAM_PENDING_ACTION
13941 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13942 /* nested hw.virt codes: */
13943 || rcStrict == VINF_VMX_VMEXIT
13944 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13945 || rcStrict == VINF_SVM_VMEXIT
13946 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13947/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13948 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13949#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13950 if ( rcStrict == VINF_VMX_VMEXIT
13951 && rcPassUp == VINF_SUCCESS)
13952 rcStrict = VINF_SUCCESS;
13953 else
13954#endif
13955#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13956 if ( rcStrict == VINF_SVM_VMEXIT
13957 && rcPassUp == VINF_SUCCESS)
13958 rcStrict = VINF_SUCCESS;
13959 else
13960#endif
13961 if (rcPassUp == VINF_SUCCESS)
13962 pVCpu->iem.s.cRetInfStatuses++;
13963 else if ( rcPassUp < VINF_EM_FIRST
13964 || rcPassUp > VINF_EM_LAST
13965 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13966 {
13967 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13968 pVCpu->iem.s.cRetPassUpStatus++;
13969 rcStrict = rcPassUp;
13970 }
13971 else
13972 {
13973 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13974 pVCpu->iem.s.cRetInfStatuses++;
13975 }
13976 }
13977 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13978 pVCpu->iem.s.cRetAspectNotImplemented++;
13979 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13980 pVCpu->iem.s.cRetInstrNotImplemented++;
13981 else
13982 pVCpu->iem.s.cRetErrStatuses++;
13983 }
13984 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13985 {
13986 pVCpu->iem.s.cRetPassUpStatus++;
13987 rcStrict = pVCpu->iem.s.rcPassUp;
13988 }
13989
13990 return rcStrict;
13991}
13992
13993
13994/**
13995 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13996 * IEMExecOneWithPrefetchedByPC.
13997 *
13998 * Similar code is found in IEMExecLots.
13999 *
14000 * @return Strict VBox status code.
14001 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14002 * @param fExecuteInhibit If set, execute the instruction following CLI,
14003 * POP SS and MOV SS,GR.
14004 * @param pszFunction The calling function name.
14005 */
14006DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
14007{
14008 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14009 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14010 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14011 RT_NOREF_PV(pszFunction);
14012
14013#ifdef IEM_WITH_SETJMP
14014 VBOXSTRICTRC rcStrict;
14015 jmp_buf JmpBuf;
14016 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14017 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14018 if ((rcStrict = setjmp(JmpBuf)) == 0)
14019 {
14020 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14021 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14022 }
14023 else
14024 pVCpu->iem.s.cLongJumps++;
14025 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14026#else
14027 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14028 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14029#endif
14030 if (rcStrict == VINF_SUCCESS)
14031 pVCpu->iem.s.cInstructions++;
14032 if (pVCpu->iem.s.cActiveMappings > 0)
14033 {
14034 Assert(rcStrict != VINF_SUCCESS);
14035 iemMemRollback(pVCpu);
14036 }
14037 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14038 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14039 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14040
14041//#ifdef DEBUG
14042// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14043//#endif
14044
14045#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14046 /*
14047 * Perform any VMX nested-guest instruction boundary actions.
14048 *
14049 * If any of these causes a VM-exit, we must skip executing the next
14050 * instruction (would run into stale page tables). A VM-exit makes sure
14051 * there is no interrupt-inhibition, so that should ensure we don't go
14052 * to try execute the next instruction. Clearing fExecuteInhibit is
14053 * problematic because of the setjmp/longjmp clobbering above.
14054 */
14055 if ( rcStrict == VINF_SUCCESS
14056 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14057 {
14058 /* TPR-below threshold/APIC write has the highest priority. */
14059 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14060 {
14061 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14062 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14064 }
14065 /* MTF takes priority over VMX-preemption timer. */
14066 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14067 {
14068 rcStrict = iemVmxVmexitMtf(pVCpu);
14069 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14070 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14071 }
14072 /* VMX preemption timer takes priority over NMI-window exits. */
14073 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14074 {
14075 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14076 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14077 rcStrict = VINF_SUCCESS;
14078 else
14079 {
14080 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14081 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14082 }
14083 }
14084 /* NMI-window VM-exit. */
14085 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW))
14086 {
14087 rcStrict = iemVmxVmexitNmiWindow(pVCpu);
14088 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14089 }
14090 }
14091#endif
14092
14093 /* Execute the next instruction as well if a cli, pop ss or
14094 mov ss, Gr has just completed successfully. */
14095 if ( fExecuteInhibit
14096 && rcStrict == VINF_SUCCESS
14097 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14098 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14099 {
14100 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14101 if (rcStrict == VINF_SUCCESS)
14102 {
14103#ifdef LOG_ENABLED
14104 iemLogCurInstr(pVCpu, false, pszFunction);
14105#endif
14106#ifdef IEM_WITH_SETJMP
14107 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14108 if ((rcStrict = setjmp(JmpBuf)) == 0)
14109 {
14110 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14111 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14112 }
14113 else
14114 pVCpu->iem.s.cLongJumps++;
14115 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14116#else
14117 IEM_OPCODE_GET_NEXT_U8(&b);
14118 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14119#endif
14120 if (rcStrict == VINF_SUCCESS)
14121 pVCpu->iem.s.cInstructions++;
14122 if (pVCpu->iem.s.cActiveMappings > 0)
14123 {
14124 Assert(rcStrict != VINF_SUCCESS);
14125 iemMemRollback(pVCpu);
14126 }
14127 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14128 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14129 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14130 }
14131 else if (pVCpu->iem.s.cActiveMappings > 0)
14132 iemMemRollback(pVCpu);
14133 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14134 }
14135
14136 /*
14137 * Return value fiddling, statistics and sanity assertions.
14138 */
14139 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14140
14141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14143 return rcStrict;
14144}
14145
14146
14147#ifdef IN_RC
14148/**
14149 * Re-enters raw-mode or ensure we return to ring-3.
14150 *
14151 * @returns rcStrict, maybe modified.
14152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14153 * @param rcStrict The status code returne by the interpreter.
14154 */
14155DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14156{
14157 if ( !pVCpu->iem.s.fInPatchCode
14158 && ( rcStrict == VINF_SUCCESS
14159 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14160 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14161 {
14162 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14163 CPUMRawEnter(pVCpu);
14164 else
14165 {
14166 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14167 rcStrict = VINF_EM_RESCHEDULE;
14168 }
14169 }
14170 return rcStrict;
14171}
14172#endif
14173
14174
14175/**
14176 * Execute one instruction.
14177 *
14178 * @return Strict VBox status code.
14179 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14180 */
14181VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14182{
14183#ifdef LOG_ENABLED
14184 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14185#endif
14186
14187 /*
14188 * Do the decoding and emulation.
14189 */
14190 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14191 if (rcStrict == VINF_SUCCESS)
14192 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14193 else if (pVCpu->iem.s.cActiveMappings > 0)
14194 iemMemRollback(pVCpu);
14195
14196#ifdef IN_RC
14197 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14198#endif
14199 if (rcStrict != VINF_SUCCESS)
14200 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14201 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14202 return rcStrict;
14203}
14204
14205
14206VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14207{
14208 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14209
14210 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14211 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14212 if (rcStrict == VINF_SUCCESS)
14213 {
14214 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14215 if (pcbWritten)
14216 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14217 }
14218 else if (pVCpu->iem.s.cActiveMappings > 0)
14219 iemMemRollback(pVCpu);
14220
14221#ifdef IN_RC
14222 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14223#endif
14224 return rcStrict;
14225}
14226
14227
14228VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14229 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14230{
14231 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14232
14233 VBOXSTRICTRC rcStrict;
14234 if ( cbOpcodeBytes
14235 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14236 {
14237 iemInitDecoder(pVCpu, false);
14238#ifdef IEM_WITH_CODE_TLB
14239 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14240 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14241 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14242 pVCpu->iem.s.offCurInstrStart = 0;
14243 pVCpu->iem.s.offInstrNextByte = 0;
14244#else
14245 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14246 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14247#endif
14248 rcStrict = VINF_SUCCESS;
14249 }
14250 else
14251 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14252 if (rcStrict == VINF_SUCCESS)
14253 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14254 else if (pVCpu->iem.s.cActiveMappings > 0)
14255 iemMemRollback(pVCpu);
14256
14257#ifdef IN_RC
14258 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14259#endif
14260 return rcStrict;
14261}
14262
14263
14264VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14265{
14266 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14267
14268 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14269 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14270 if (rcStrict == VINF_SUCCESS)
14271 {
14272 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14273 if (pcbWritten)
14274 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14275 }
14276 else if (pVCpu->iem.s.cActiveMappings > 0)
14277 iemMemRollback(pVCpu);
14278
14279#ifdef IN_RC
14280 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14281#endif
14282 return rcStrict;
14283}
14284
14285
14286VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14287 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14288{
14289 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14290
14291 VBOXSTRICTRC rcStrict;
14292 if ( cbOpcodeBytes
14293 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14294 {
14295 iemInitDecoder(pVCpu, true);
14296#ifdef IEM_WITH_CODE_TLB
14297 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14298 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14299 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14300 pVCpu->iem.s.offCurInstrStart = 0;
14301 pVCpu->iem.s.offInstrNextByte = 0;
14302#else
14303 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14304 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14305#endif
14306 rcStrict = VINF_SUCCESS;
14307 }
14308 else
14309 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14310 if (rcStrict == VINF_SUCCESS)
14311 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14312 else if (pVCpu->iem.s.cActiveMappings > 0)
14313 iemMemRollback(pVCpu);
14314
14315#ifdef IN_RC
14316 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14317#endif
14318 return rcStrict;
14319}
14320
14321
14322/**
14323 * For debugging DISGetParamSize, may come in handy.
14324 *
14325 * @returns Strict VBox status code.
14326 * @param pVCpu The cross context virtual CPU structure of the
14327 * calling EMT.
14328 * @param pCtxCore The context core structure.
14329 * @param OpcodeBytesPC The PC of the opcode bytes.
14330 * @param pvOpcodeBytes Prefeched opcode bytes.
14331 * @param cbOpcodeBytes Number of prefetched bytes.
14332 * @param pcbWritten Where to return the number of bytes written.
14333 * Optional.
14334 */
14335VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14336 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14337 uint32_t *pcbWritten)
14338{
14339 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14340
14341 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14342 VBOXSTRICTRC rcStrict;
14343 if ( cbOpcodeBytes
14344 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14345 {
14346 iemInitDecoder(pVCpu, true);
14347#ifdef IEM_WITH_CODE_TLB
14348 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14349 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14350 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14351 pVCpu->iem.s.offCurInstrStart = 0;
14352 pVCpu->iem.s.offInstrNextByte = 0;
14353#else
14354 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14355 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14356#endif
14357 rcStrict = VINF_SUCCESS;
14358 }
14359 else
14360 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14361 if (rcStrict == VINF_SUCCESS)
14362 {
14363 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14364 if (pcbWritten)
14365 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14366 }
14367 else if (pVCpu->iem.s.cActiveMappings > 0)
14368 iemMemRollback(pVCpu);
14369
14370#ifdef IN_RC
14371 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14372#endif
14373 return rcStrict;
14374}
14375
14376
14377VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14378{
14379 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14380 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14381
14382 /*
14383 * See if there is an interrupt pending in TRPM, inject it if we can.
14384 */
14385 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14386#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14387 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14388 if (fIntrEnabled)
14389 {
14390 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14391 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14392 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14393 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14394 else
14395 {
14396 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14397 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14398 }
14399 }
14400#else
14401 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14402#endif
14403 if ( fIntrEnabled
14404 && TRPMHasTrap(pVCpu)
14405 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14406 {
14407 uint8_t u8TrapNo;
14408 TRPMEVENT enmType;
14409 RTGCUINT uErrCode;
14410 RTGCPTR uCr2;
14411 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14412 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14413 TRPMResetTrap(pVCpu);
14414#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14415 /* Injecting an event may cause a VM-exit. */
14416 if ( rcStrict != VINF_SUCCESS
14417 && rcStrict != VINF_IEM_RAISED_XCPT)
14418 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14419#else
14420 NOREF(rcStrict);
14421#endif
14422 }
14423
14424 /*
14425 * Initial decoder init w/ prefetch, then setup setjmp.
14426 */
14427 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14428 if (rcStrict == VINF_SUCCESS)
14429 {
14430#ifdef IEM_WITH_SETJMP
14431 jmp_buf JmpBuf;
14432 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14433 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14434 pVCpu->iem.s.cActiveMappings = 0;
14435 if ((rcStrict = setjmp(JmpBuf)) == 0)
14436#endif
14437 {
14438 /*
14439 * The run loop. We limit ourselves to 4096 instructions right now.
14440 */
14441 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14442 PVM pVM = pVCpu->CTX_SUFF(pVM);
14443 for (;;)
14444 {
14445 /*
14446 * Log the state.
14447 */
14448#ifdef LOG_ENABLED
14449 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14450#endif
14451
14452 /*
14453 * Do the decoding and emulation.
14454 */
14455 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14456 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14457 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14458 {
14459 Assert(pVCpu->iem.s.cActiveMappings == 0);
14460 pVCpu->iem.s.cInstructions++;
14461 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14462 {
14463 uint64_t fCpu = pVCpu->fLocalForcedActions
14464 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14465 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14466 | VMCPU_FF_TLB_FLUSH
14467#ifdef VBOX_WITH_RAW_MODE
14468 | VMCPU_FF_TRPM_SYNC_IDT
14469 | VMCPU_FF_SELM_SYNC_TSS
14470 | VMCPU_FF_SELM_SYNC_GDT
14471 | VMCPU_FF_SELM_SYNC_LDT
14472#endif
14473 | VMCPU_FF_INHIBIT_INTERRUPTS
14474 | VMCPU_FF_BLOCK_NMIS
14475 | VMCPU_FF_UNHALT ));
14476
14477 if (RT_LIKELY( ( !fCpu
14478 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14479 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14480 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14481 {
14482 if (cMaxInstructionsGccStupidity-- > 0)
14483 {
14484 /* Poll timers every now an then according to the caller's specs. */
14485 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14486 || !TMTimerPollBool(pVM, pVCpu))
14487 {
14488 Assert(pVCpu->iem.s.cActiveMappings == 0);
14489 iemReInitDecoder(pVCpu);
14490 continue;
14491 }
14492 }
14493 }
14494 }
14495 Assert(pVCpu->iem.s.cActiveMappings == 0);
14496 }
14497 else if (pVCpu->iem.s.cActiveMappings > 0)
14498 iemMemRollback(pVCpu);
14499 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14500 break;
14501 }
14502 }
14503#ifdef IEM_WITH_SETJMP
14504 else
14505 {
14506 if (pVCpu->iem.s.cActiveMappings > 0)
14507 iemMemRollback(pVCpu);
14508# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14509 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14510# endif
14511 pVCpu->iem.s.cLongJumps++;
14512 }
14513 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14514#endif
14515
14516 /*
14517 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14518 */
14519 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14520 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14521 }
14522 else
14523 {
14524 if (pVCpu->iem.s.cActiveMappings > 0)
14525 iemMemRollback(pVCpu);
14526
14527#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14528 /*
14529 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14530 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14531 */
14532 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14533#endif
14534 }
14535
14536 /*
14537 * Maybe re-enter raw-mode and log.
14538 */
14539#ifdef IN_RC
14540 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14541#endif
14542 if (rcStrict != VINF_SUCCESS)
14543 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14544 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14545 if (pcInstructions)
14546 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14547 return rcStrict;
14548}
14549
14550
14551/**
14552 * Interface used by EMExecuteExec, does exit statistics and limits.
14553 *
14554 * @returns Strict VBox status code.
14555 * @param pVCpu The cross context virtual CPU structure.
14556 * @param fWillExit To be defined.
14557 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14558 * @param cMaxInstructions Maximum number of instructions to execute.
14559 * @param cMaxInstructionsWithoutExits
14560 * The max number of instructions without exits.
14561 * @param pStats Where to return statistics.
14562 */
14563VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14564 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14565{
14566 NOREF(fWillExit); /** @todo define flexible exit crits */
14567
14568 /*
14569 * Initialize return stats.
14570 */
14571 pStats->cInstructions = 0;
14572 pStats->cExits = 0;
14573 pStats->cMaxExitDistance = 0;
14574 pStats->cReserved = 0;
14575
14576 /*
14577 * Initial decoder init w/ prefetch, then setup setjmp.
14578 */
14579 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14580 if (rcStrict == VINF_SUCCESS)
14581 {
14582#ifdef IEM_WITH_SETJMP
14583 jmp_buf JmpBuf;
14584 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14585 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14586 pVCpu->iem.s.cActiveMappings = 0;
14587 if ((rcStrict = setjmp(JmpBuf)) == 0)
14588#endif
14589 {
14590#ifdef IN_RING0
14591 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14592#endif
14593 uint32_t cInstructionSinceLastExit = 0;
14594
14595 /*
14596 * The run loop. We limit ourselves to 4096 instructions right now.
14597 */
14598 PVM pVM = pVCpu->CTX_SUFF(pVM);
14599 for (;;)
14600 {
14601 /*
14602 * Log the state.
14603 */
14604#ifdef LOG_ENABLED
14605 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14606#endif
14607
14608 /*
14609 * Do the decoding and emulation.
14610 */
14611 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14612
14613 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14614 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14615
14616 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14617 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14618 {
14619 pStats->cExits += 1;
14620 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14621 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14622 cInstructionSinceLastExit = 0;
14623 }
14624
14625 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14626 {
14627 Assert(pVCpu->iem.s.cActiveMappings == 0);
14628 pVCpu->iem.s.cInstructions++;
14629 pStats->cInstructions++;
14630 cInstructionSinceLastExit++;
14631 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14632 {
14633 uint64_t fCpu = pVCpu->fLocalForcedActions
14634 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14635 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14636 | VMCPU_FF_TLB_FLUSH
14637#ifdef VBOX_WITH_RAW_MODE
14638 | VMCPU_FF_TRPM_SYNC_IDT
14639 | VMCPU_FF_SELM_SYNC_TSS
14640 | VMCPU_FF_SELM_SYNC_GDT
14641 | VMCPU_FF_SELM_SYNC_LDT
14642#endif
14643 | VMCPU_FF_INHIBIT_INTERRUPTS
14644 | VMCPU_FF_BLOCK_NMIS
14645 | VMCPU_FF_UNHALT ));
14646
14647 if (RT_LIKELY( ( ( !fCpu
14648 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14649 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14650 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14651 || pStats->cInstructions < cMinInstructions))
14652 {
14653 if (pStats->cInstructions < cMaxInstructions)
14654 {
14655 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14656 {
14657#ifdef IN_RING0
14658 if ( !fCheckPreemptionPending
14659 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14660#endif
14661 {
14662 Assert(pVCpu->iem.s.cActiveMappings == 0);
14663 iemReInitDecoder(pVCpu);
14664 continue;
14665 }
14666#ifdef IN_RING0
14667 rcStrict = VINF_EM_RAW_INTERRUPT;
14668 break;
14669#endif
14670 }
14671 }
14672 }
14673 Assert(!(fCpu & VMCPU_FF_IEM));
14674 }
14675 Assert(pVCpu->iem.s.cActiveMappings == 0);
14676 }
14677 else if (pVCpu->iem.s.cActiveMappings > 0)
14678 iemMemRollback(pVCpu);
14679 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14680 break;
14681 }
14682 }
14683#ifdef IEM_WITH_SETJMP
14684 else
14685 {
14686 if (pVCpu->iem.s.cActiveMappings > 0)
14687 iemMemRollback(pVCpu);
14688 pVCpu->iem.s.cLongJumps++;
14689 }
14690 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14691#endif
14692
14693 /*
14694 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14695 */
14696 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14697 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14698 }
14699 else
14700 {
14701 if (pVCpu->iem.s.cActiveMappings > 0)
14702 iemMemRollback(pVCpu);
14703
14704#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14705 /*
14706 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14707 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14708 */
14709 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14710#endif
14711 }
14712
14713 /*
14714 * Maybe re-enter raw-mode and log.
14715 */
14716#ifdef IN_RC
14717 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14718#endif
14719 if (rcStrict != VINF_SUCCESS)
14720 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14721 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14722 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14723 return rcStrict;
14724}
14725
14726
14727/**
14728 * Injects a trap, fault, abort, software interrupt or external interrupt.
14729 *
14730 * The parameter list matches TRPMQueryTrapAll pretty closely.
14731 *
14732 * @returns Strict VBox status code.
14733 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14734 * @param u8TrapNo The trap number.
14735 * @param enmType What type is it (trap/fault/abort), software
14736 * interrupt or hardware interrupt.
14737 * @param uErrCode The error code if applicable.
14738 * @param uCr2 The CR2 value if applicable.
14739 * @param cbInstr The instruction length (only relevant for
14740 * software interrupts).
14741 */
14742VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14743 uint8_t cbInstr)
14744{
14745 iemInitDecoder(pVCpu, false);
14746#ifdef DBGFTRACE_ENABLED
14747 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14748 u8TrapNo, enmType, uErrCode, uCr2);
14749#endif
14750
14751 uint32_t fFlags;
14752 switch (enmType)
14753 {
14754 case TRPM_HARDWARE_INT:
14755 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14756 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14757 uErrCode = uCr2 = 0;
14758 break;
14759
14760 case TRPM_SOFTWARE_INT:
14761 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14762 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14763 uErrCode = uCr2 = 0;
14764 break;
14765
14766 case TRPM_TRAP:
14767 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14768 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14769 if (u8TrapNo == X86_XCPT_PF)
14770 fFlags |= IEM_XCPT_FLAGS_CR2;
14771 switch (u8TrapNo)
14772 {
14773 case X86_XCPT_DF:
14774 case X86_XCPT_TS:
14775 case X86_XCPT_NP:
14776 case X86_XCPT_SS:
14777 case X86_XCPT_PF:
14778 case X86_XCPT_AC:
14779 fFlags |= IEM_XCPT_FLAGS_ERR;
14780 break;
14781 }
14782 break;
14783
14784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14785 }
14786
14787 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14788
14789 if (pVCpu->iem.s.cActiveMappings > 0)
14790 iemMemRollback(pVCpu);
14791
14792 return rcStrict;
14793}
14794
14795
14796/**
14797 * Injects the active TRPM event.
14798 *
14799 * @returns Strict VBox status code.
14800 * @param pVCpu The cross context virtual CPU structure.
14801 */
14802VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14803{
14804#ifndef IEM_IMPLEMENTS_TASKSWITCH
14805 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14806#else
14807 uint8_t u8TrapNo;
14808 TRPMEVENT enmType;
14809 RTGCUINT uErrCode;
14810 RTGCUINTPTR uCr2;
14811 uint8_t cbInstr;
14812 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14813 if (RT_FAILURE(rc))
14814 return rc;
14815
14816 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14817#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14818 if (rcStrict == VINF_SVM_VMEXIT)
14819 rcStrict = VINF_SUCCESS;
14820#endif
14821#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14822 if (rcStrict == VINF_VMX_VMEXIT)
14823 rcStrict = VINF_SUCCESS;
14824#endif
14825 /** @todo Are there any other codes that imply the event was successfully
14826 * delivered to the guest? See @bugref{6607}. */
14827 if ( rcStrict == VINF_SUCCESS
14828 || rcStrict == VINF_IEM_RAISED_XCPT)
14829 TRPMResetTrap(pVCpu);
14830
14831 return rcStrict;
14832#endif
14833}
14834
14835
14836VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14837{
14838 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14839 return VERR_NOT_IMPLEMENTED;
14840}
14841
14842
14843VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14844{
14845 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14846 return VERR_NOT_IMPLEMENTED;
14847}
14848
14849
14850#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14851/**
14852 * Executes a IRET instruction with default operand size.
14853 *
14854 * This is for PATM.
14855 *
14856 * @returns VBox status code.
14857 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14858 * @param pCtxCore The register frame.
14859 */
14860VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14861{
14862 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14863
14864 iemCtxCoreToCtx(pCtx, pCtxCore);
14865 iemInitDecoder(pVCpu);
14866 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14867 if (rcStrict == VINF_SUCCESS)
14868 iemCtxToCtxCore(pCtxCore, pCtx);
14869 else
14870 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14871 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14872 return rcStrict;
14873}
14874#endif
14875
14876
14877/**
14878 * Macro used by the IEMExec* method to check the given instruction length.
14879 *
14880 * Will return on failure!
14881 *
14882 * @param a_cbInstr The given instruction length.
14883 * @param a_cbMin The minimum length.
14884 */
14885#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14886 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14887 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14888
14889
14890/**
14891 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14892 *
14893 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14894 *
14895 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14897 * @param rcStrict The status code to fiddle.
14898 */
14899DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14900{
14901 iemUninitExec(pVCpu);
14902#ifdef IN_RC
14903 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14904#else
14905 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14906#endif
14907}
14908
14909
14910/**
14911 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14912 *
14913 * This API ASSUMES that the caller has already verified that the guest code is
14914 * allowed to access the I/O port. (The I/O port is in the DX register in the
14915 * guest state.)
14916 *
14917 * @returns Strict VBox status code.
14918 * @param pVCpu The cross context virtual CPU structure.
14919 * @param cbValue The size of the I/O port access (1, 2, or 4).
14920 * @param enmAddrMode The addressing mode.
14921 * @param fRepPrefix Indicates whether a repeat prefix is used
14922 * (doesn't matter which for this instruction).
14923 * @param cbInstr The instruction length in bytes.
14924 * @param iEffSeg The effective segment address.
14925 * @param fIoChecked Whether the access to the I/O port has been
14926 * checked or not. It's typically checked in the
14927 * HM scenario.
14928 */
14929VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14930 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14931{
14932 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14933 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14934
14935 /*
14936 * State init.
14937 */
14938 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14939
14940 /*
14941 * Switch orgy for getting to the right handler.
14942 */
14943 VBOXSTRICTRC rcStrict;
14944 if (fRepPrefix)
14945 {
14946 switch (enmAddrMode)
14947 {
14948 case IEMMODE_16BIT:
14949 switch (cbValue)
14950 {
14951 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14952 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14953 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14954 default:
14955 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14956 }
14957 break;
14958
14959 case IEMMODE_32BIT:
14960 switch (cbValue)
14961 {
14962 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14963 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14964 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14965 default:
14966 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14967 }
14968 break;
14969
14970 case IEMMODE_64BIT:
14971 switch (cbValue)
14972 {
14973 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14974 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14975 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14976 default:
14977 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14978 }
14979 break;
14980
14981 default:
14982 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14983 }
14984 }
14985 else
14986 {
14987 switch (enmAddrMode)
14988 {
14989 case IEMMODE_16BIT:
14990 switch (cbValue)
14991 {
14992 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14993 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14994 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14995 default:
14996 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14997 }
14998 break;
14999
15000 case IEMMODE_32BIT:
15001 switch (cbValue)
15002 {
15003 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15004 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15005 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15006 default:
15007 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15008 }
15009 break;
15010
15011 case IEMMODE_64BIT:
15012 switch (cbValue)
15013 {
15014 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15015 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15016 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15017 default:
15018 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15019 }
15020 break;
15021
15022 default:
15023 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15024 }
15025 }
15026
15027 if (pVCpu->iem.s.cActiveMappings)
15028 iemMemRollback(pVCpu);
15029
15030 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15031}
15032
15033
15034/**
15035 * Interface for HM and EM for executing string I/O IN (read) instructions.
15036 *
15037 * This API ASSUMES that the caller has already verified that the guest code is
15038 * allowed to access the I/O port. (The I/O port is in the DX register in the
15039 * guest state.)
15040 *
15041 * @returns Strict VBox status code.
15042 * @param pVCpu The cross context virtual CPU structure.
15043 * @param cbValue The size of the I/O port access (1, 2, or 4).
15044 * @param enmAddrMode The addressing mode.
15045 * @param fRepPrefix Indicates whether a repeat prefix is used
15046 * (doesn't matter which for this instruction).
15047 * @param cbInstr The instruction length in bytes.
15048 * @param fIoChecked Whether the access to the I/O port has been
15049 * checked or not. It's typically checked in the
15050 * HM scenario.
15051 */
15052VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15053 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15054{
15055 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15056
15057 /*
15058 * State init.
15059 */
15060 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15061
15062 /*
15063 * Switch orgy for getting to the right handler.
15064 */
15065 VBOXSTRICTRC rcStrict;
15066 if (fRepPrefix)
15067 {
15068 switch (enmAddrMode)
15069 {
15070 case IEMMODE_16BIT:
15071 switch (cbValue)
15072 {
15073 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15074 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15075 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15076 default:
15077 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15078 }
15079 break;
15080
15081 case IEMMODE_32BIT:
15082 switch (cbValue)
15083 {
15084 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15085 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15086 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15087 default:
15088 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15089 }
15090 break;
15091
15092 case IEMMODE_64BIT:
15093 switch (cbValue)
15094 {
15095 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15096 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15097 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15098 default:
15099 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15100 }
15101 break;
15102
15103 default:
15104 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15105 }
15106 }
15107 else
15108 {
15109 switch (enmAddrMode)
15110 {
15111 case IEMMODE_16BIT:
15112 switch (cbValue)
15113 {
15114 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15115 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15116 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15117 default:
15118 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15119 }
15120 break;
15121
15122 case IEMMODE_32BIT:
15123 switch (cbValue)
15124 {
15125 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15126 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15127 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15128 default:
15129 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15130 }
15131 break;
15132
15133 case IEMMODE_64BIT:
15134 switch (cbValue)
15135 {
15136 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15137 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15138 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15139 default:
15140 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15141 }
15142 break;
15143
15144 default:
15145 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15146 }
15147 }
15148
15149 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15151}
15152
15153
15154/**
15155 * Interface for rawmode to write execute an OUT instruction.
15156 *
15157 * @returns Strict VBox status code.
15158 * @param pVCpu The cross context virtual CPU structure.
15159 * @param cbInstr The instruction length in bytes.
15160 * @param u16Port The port to read.
15161 * @param fImm Whether the port is specified using an immediate operand or
15162 * using the implicit DX register.
15163 * @param cbReg The register size.
15164 *
15165 * @remarks In ring-0 not all of the state needs to be synced in.
15166 */
15167VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15168{
15169 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15170 Assert(cbReg <= 4 && cbReg != 3);
15171
15172 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15174 Assert(!pVCpu->iem.s.cActiveMappings);
15175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15176}
15177
15178
15179/**
15180 * Interface for rawmode to write execute an IN instruction.
15181 *
15182 * @returns Strict VBox status code.
15183 * @param pVCpu The cross context virtual CPU structure.
15184 * @param cbInstr The instruction length in bytes.
15185 * @param u16Port The port to read.
15186 * @param fImm Whether the port is specified using an immediate operand or
15187 * using the implicit DX.
15188 * @param cbReg The register size.
15189 */
15190VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15191{
15192 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15193 Assert(cbReg <= 4 && cbReg != 3);
15194
15195 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15196 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15197 Assert(!pVCpu->iem.s.cActiveMappings);
15198 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15199}
15200
15201
15202/**
15203 * Interface for HM and EM to write to a CRx register.
15204 *
15205 * @returns Strict VBox status code.
15206 * @param pVCpu The cross context virtual CPU structure.
15207 * @param cbInstr The instruction length in bytes.
15208 * @param iCrReg The control register number (destination).
15209 * @param iGReg The general purpose register number (source).
15210 *
15211 * @remarks In ring-0 not all of the state needs to be synced in.
15212 */
15213VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15214{
15215 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15216 Assert(iCrReg < 16);
15217 Assert(iGReg < 16);
15218
15219 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15220 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15221 Assert(!pVCpu->iem.s.cActiveMappings);
15222 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15223}
15224
15225
15226/**
15227 * Interface for HM and EM to read from a CRx register.
15228 *
15229 * @returns Strict VBox status code.
15230 * @param pVCpu The cross context virtual CPU structure.
15231 * @param cbInstr The instruction length in bytes.
15232 * @param iGReg The general purpose register number (destination).
15233 * @param iCrReg The control register number (source).
15234 *
15235 * @remarks In ring-0 not all of the state needs to be synced in.
15236 */
15237VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15238{
15239 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15240 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15241 | CPUMCTX_EXTRN_APIC_TPR);
15242 Assert(iCrReg < 16);
15243 Assert(iGReg < 16);
15244
15245 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15246 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15247 Assert(!pVCpu->iem.s.cActiveMappings);
15248 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15249}
15250
15251
15252/**
15253 * Interface for HM and EM to clear the CR0[TS] bit.
15254 *
15255 * @returns Strict VBox status code.
15256 * @param pVCpu The cross context virtual CPU structure.
15257 * @param cbInstr The instruction length in bytes.
15258 *
15259 * @remarks In ring-0 not all of the state needs to be synced in.
15260 */
15261VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15262{
15263 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15264
15265 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15266 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15267 Assert(!pVCpu->iem.s.cActiveMappings);
15268 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15269}
15270
15271
15272/**
15273 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15274 *
15275 * @returns Strict VBox status code.
15276 * @param pVCpu The cross context virtual CPU structure.
15277 * @param cbInstr The instruction length in bytes.
15278 * @param uValue The value to load into CR0.
15279 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15280 * memory operand. Otherwise pass NIL_RTGCPTR.
15281 *
15282 * @remarks In ring-0 not all of the state needs to be synced in.
15283 */
15284VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15285{
15286 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15287
15288 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15289 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15290 Assert(!pVCpu->iem.s.cActiveMappings);
15291 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15292}
15293
15294
15295/**
15296 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15297 *
15298 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15299 *
15300 * @returns Strict VBox status code.
15301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15302 * @param cbInstr The instruction length in bytes.
15303 * @remarks In ring-0 not all of the state needs to be synced in.
15304 * @thread EMT(pVCpu)
15305 */
15306VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15307{
15308 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15309
15310 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15311 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15312 Assert(!pVCpu->iem.s.cActiveMappings);
15313 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15314}
15315
15316
15317/**
15318 * Interface for HM and EM to emulate the WBINVD instruction.
15319 *
15320 * @returns Strict VBox status code.
15321 * @param pVCpu The cross context virtual CPU structure.
15322 * @param cbInstr The instruction length in bytes.
15323 *
15324 * @remarks In ring-0 not all of the state needs to be synced in.
15325 */
15326VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15327{
15328 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15329
15330 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15331 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15332 Assert(!pVCpu->iem.s.cActiveMappings);
15333 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15334}
15335
15336
15337/**
15338 * Interface for HM and EM to emulate the INVD instruction.
15339 *
15340 * @returns Strict VBox status code.
15341 * @param pVCpu The cross context virtual CPU structure.
15342 * @param cbInstr The instruction length in bytes.
15343 *
15344 * @remarks In ring-0 not all of the state needs to be synced in.
15345 */
15346VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15347{
15348 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15349
15350 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15352 Assert(!pVCpu->iem.s.cActiveMappings);
15353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15354}
15355
15356
15357/**
15358 * Interface for HM and EM to emulate the INVLPG instruction.
15359 *
15360 * @returns Strict VBox status code.
15361 * @retval VINF_PGM_SYNC_CR3
15362 *
15363 * @param pVCpu The cross context virtual CPU structure.
15364 * @param cbInstr The instruction length in bytes.
15365 * @param GCPtrPage The effective address of the page to invalidate.
15366 *
15367 * @remarks In ring-0 not all of the state needs to be synced in.
15368 */
15369VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15370{
15371 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15372
15373 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15374 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15375 Assert(!pVCpu->iem.s.cActiveMappings);
15376 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15377}
15378
15379
15380/**
15381 * Interface for HM and EM to emulate the CPUID instruction.
15382 *
15383 * @returns Strict VBox status code.
15384 *
15385 * @param pVCpu The cross context virtual CPU structure.
15386 * @param cbInstr The instruction length in bytes.
15387 *
15388 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15389 */
15390VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15391{
15392 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15393 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15394
15395 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15396 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15397 Assert(!pVCpu->iem.s.cActiveMappings);
15398 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15399}
15400
15401
15402/**
15403 * Interface for HM and EM to emulate the RDPMC instruction.
15404 *
15405 * @returns Strict VBox status code.
15406 *
15407 * @param pVCpu The cross context virtual CPU structure.
15408 * @param cbInstr The instruction length in bytes.
15409 *
15410 * @remarks Not all of the state needs to be synced in.
15411 */
15412VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15413{
15414 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15415 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15416
15417 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15418 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15419 Assert(!pVCpu->iem.s.cActiveMappings);
15420 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15421}
15422
15423
15424/**
15425 * Interface for HM and EM to emulate the RDTSC instruction.
15426 *
15427 * @returns Strict VBox status code.
15428 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15429 *
15430 * @param pVCpu The cross context virtual CPU structure.
15431 * @param cbInstr The instruction length in bytes.
15432 *
15433 * @remarks Not all of the state needs to be synced in.
15434 */
15435VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15436{
15437 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15438 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15439
15440 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15441 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15442 Assert(!pVCpu->iem.s.cActiveMappings);
15443 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15444}
15445
15446
15447/**
15448 * Interface for HM and EM to emulate the RDTSCP instruction.
15449 *
15450 * @returns Strict VBox status code.
15451 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15452 *
15453 * @param pVCpu The cross context virtual CPU structure.
15454 * @param cbInstr The instruction length in bytes.
15455 *
15456 * @remarks Not all of the state needs to be synced in. Recommended
15457 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15458 */
15459VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15460{
15461 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15462 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15463
15464 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15465 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15466 Assert(!pVCpu->iem.s.cActiveMappings);
15467 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15468}
15469
15470
15471/**
15472 * Interface for HM and EM to emulate the RDMSR instruction.
15473 *
15474 * @returns Strict VBox status code.
15475 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15476 *
15477 * @param pVCpu The cross context virtual CPU structure.
15478 * @param cbInstr The instruction length in bytes.
15479 *
15480 * @remarks Not all of the state needs to be synced in. Requires RCX and
15481 * (currently) all MSRs.
15482 */
15483VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15484{
15485 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15486 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15487
15488 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15489 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15490 Assert(!pVCpu->iem.s.cActiveMappings);
15491 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15492}
15493
15494
15495/**
15496 * Interface for HM and EM to emulate the WRMSR instruction.
15497 *
15498 * @returns Strict VBox status code.
15499 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15500 *
15501 * @param pVCpu The cross context virtual CPU structure.
15502 * @param cbInstr The instruction length in bytes.
15503 *
15504 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15505 * and (currently) all MSRs.
15506 */
15507VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15508{
15509 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15510 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15511 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15512
15513 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15514 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15515 Assert(!pVCpu->iem.s.cActiveMappings);
15516 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15517}
15518
15519
15520/**
15521 * Interface for HM and EM to emulate the MONITOR instruction.
15522 *
15523 * @returns Strict VBox status code.
15524 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15525 *
15526 * @param pVCpu The cross context virtual CPU structure.
15527 * @param cbInstr The instruction length in bytes.
15528 *
15529 * @remarks Not all of the state needs to be synced in.
15530 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15531 * are used.
15532 */
15533VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15534{
15535 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15536 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15537
15538 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15539 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15540 Assert(!pVCpu->iem.s.cActiveMappings);
15541 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15542}
15543
15544
15545/**
15546 * Interface for HM and EM to emulate the MWAIT instruction.
15547 *
15548 * @returns Strict VBox status code.
15549 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15550 *
15551 * @param pVCpu The cross context virtual CPU structure.
15552 * @param cbInstr The instruction length in bytes.
15553 *
15554 * @remarks Not all of the state needs to be synced in.
15555 */
15556VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15557{
15558 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15559
15560 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15561 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15562 Assert(!pVCpu->iem.s.cActiveMappings);
15563 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15564}
15565
15566
15567/**
15568 * Interface for HM and EM to emulate the HLT instruction.
15569 *
15570 * @returns Strict VBox status code.
15571 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15572 *
15573 * @param pVCpu The cross context virtual CPU structure.
15574 * @param cbInstr The instruction length in bytes.
15575 *
15576 * @remarks Not all of the state needs to be synced in.
15577 */
15578VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15579{
15580 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15581
15582 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15583 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15584 Assert(!pVCpu->iem.s.cActiveMappings);
15585 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15586}
15587
15588
15589/**
15590 * Checks if IEM is in the process of delivering an event (interrupt or
15591 * exception).
15592 *
15593 * @returns true if we're in the process of raising an interrupt or exception,
15594 * false otherwise.
15595 * @param pVCpu The cross context virtual CPU structure.
15596 * @param puVector Where to store the vector associated with the
15597 * currently delivered event, optional.
15598 * @param pfFlags Where to store th event delivery flags (see
15599 * IEM_XCPT_FLAGS_XXX), optional.
15600 * @param puErr Where to store the error code associated with the
15601 * event, optional.
15602 * @param puCr2 Where to store the CR2 associated with the event,
15603 * optional.
15604 * @remarks The caller should check the flags to determine if the error code and
15605 * CR2 are valid for the event.
15606 */
15607VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15608{
15609 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15610 if (fRaisingXcpt)
15611 {
15612 if (puVector)
15613 *puVector = pVCpu->iem.s.uCurXcpt;
15614 if (pfFlags)
15615 *pfFlags = pVCpu->iem.s.fCurXcpt;
15616 if (puErr)
15617 *puErr = pVCpu->iem.s.uCurXcptErr;
15618 if (puCr2)
15619 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15620 }
15621 return fRaisingXcpt;
15622}
15623
15624#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15625
15626/**
15627 * Interface for HM and EM to emulate the CLGI instruction.
15628 *
15629 * @returns Strict VBox status code.
15630 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15631 * @param cbInstr The instruction length in bytes.
15632 * @thread EMT(pVCpu)
15633 */
15634VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15635{
15636 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15637
15638 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15639 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15640 Assert(!pVCpu->iem.s.cActiveMappings);
15641 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15642}
15643
15644
15645/**
15646 * Interface for HM and EM to emulate the STGI instruction.
15647 *
15648 * @returns Strict VBox status code.
15649 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15650 * @param cbInstr The instruction length in bytes.
15651 * @thread EMT(pVCpu)
15652 */
15653VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15654{
15655 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15656
15657 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15658 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15659 Assert(!pVCpu->iem.s.cActiveMappings);
15660 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15661}
15662
15663
15664/**
15665 * Interface for HM and EM to emulate the VMLOAD instruction.
15666 *
15667 * @returns Strict VBox status code.
15668 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15669 * @param cbInstr The instruction length in bytes.
15670 * @thread EMT(pVCpu)
15671 */
15672VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15673{
15674 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15675
15676 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15677 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15678 Assert(!pVCpu->iem.s.cActiveMappings);
15679 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15680}
15681
15682
15683/**
15684 * Interface for HM and EM to emulate the VMSAVE instruction.
15685 *
15686 * @returns Strict VBox status code.
15687 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15688 * @param cbInstr The instruction length in bytes.
15689 * @thread EMT(pVCpu)
15690 */
15691VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15692{
15693 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15694
15695 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15696 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15697 Assert(!pVCpu->iem.s.cActiveMappings);
15698 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15699}
15700
15701
15702/**
15703 * Interface for HM and EM to emulate the INVLPGA instruction.
15704 *
15705 * @returns Strict VBox status code.
15706 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15707 * @param cbInstr The instruction length in bytes.
15708 * @thread EMT(pVCpu)
15709 */
15710VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15711{
15712 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15713
15714 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15715 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15716 Assert(!pVCpu->iem.s.cActiveMappings);
15717 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15718}
15719
15720
15721/**
15722 * Interface for HM and EM to emulate the VMRUN instruction.
15723 *
15724 * @returns Strict VBox status code.
15725 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15726 * @param cbInstr The instruction length in bytes.
15727 * @thread EMT(pVCpu)
15728 */
15729VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15730{
15731 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15732 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15733
15734 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15735 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15736 Assert(!pVCpu->iem.s.cActiveMappings);
15737 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15738}
15739
15740
15741/**
15742 * Interface for HM and EM to emulate \#VMEXIT.
15743 *
15744 * @returns Strict VBox status code.
15745 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15746 * @param uExitCode The exit code.
15747 * @param uExitInfo1 The exit info. 1 field.
15748 * @param uExitInfo2 The exit info. 2 field.
15749 * @thread EMT(pVCpu)
15750 */
15751VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15752{
15753 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15754 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15755 if (pVCpu->iem.s.cActiveMappings)
15756 iemMemRollback(pVCpu);
15757 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15758}
15759
15760#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15761
15762#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15763
15764/**
15765 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15766 *
15767 * @returns Strict VBox status code.
15768 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15769 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15770 * the x2APIC device.
15771 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15772 *
15773 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15774 * @param idMsr The MSR being read.
15775 * @param pu64Value Pointer to the value being written or where to store the
15776 * value being read.
15777 * @param fWrite Whether this is an MSR write or read access.
15778 * @thread EMT(pVCpu)
15779 */
15780VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15781{
15782 Assert(pu64Value);
15783
15784 VBOXSTRICTRC rcStrict;
15785 if (!fWrite)
15786 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15787 else
15788 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15789 if (pVCpu->iem.s.cActiveMappings)
15790 iemMemRollback(pVCpu);
15791 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15792
15793}
15794
15795
15796/**
15797 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15798 *
15799 * @returns Strict VBox status code.
15800 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15801 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15802 *
15803 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15804 * @param offAccess The offset of the register being accessed (within the
15805 * APIC-access page).
15806 * @param cbAccess The size of the access in bytes.
15807 * @param pvData Pointer to the data being written or where to store the data
15808 * being read.
15809 * @param fWrite Whether this is a write or read access.
15810 * @thread EMT(pVCpu)
15811 */
15812VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
15813 bool fWrite)
15814{
15815 Assert(pvData);
15816
15817 /** @todo NSTVMX: Unfortunately, the caller has no idea about instruction fetch
15818 * accesses, so we only use read/write here. Maybe in the future the PGM
15819 * physical handler will be extended to include this information? */
15820 uint32_t const fAccess = fWrite ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
15821 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbAccess, pvData, fAccess);
15822 if (pVCpu->iem.s.cActiveMappings)
15823 iemMemRollback(pVCpu);
15824 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15825}
15826
15827
15828/**
15829 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15830 * VM-exit.
15831 *
15832 * @returns Strict VBox status code.
15833 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15834 * @thread EMT(pVCpu)
15835 */
15836VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15837{
15838 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15839 if (pVCpu->iem.s.cActiveMappings)
15840 iemMemRollback(pVCpu);
15841 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15842}
15843
15844
15845/**
15846 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15847 *
15848 * @returns Strict VBox status code.
15849 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15850 * @thread EMT(pVCpu)
15851 */
15852VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15853{
15854 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15855 if (pVCpu->iem.s.cActiveMappings)
15856 iemMemRollback(pVCpu);
15857 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15858}
15859
15860
15861/**
15862 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15863 *
15864 * @returns Strict VBox status code.
15865 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15866 * @param uVector The external interrupt vector (pass 0 if the external
15867 * interrupt is still pending).
15868 * @param fIntPending Whether the external interrupt is pending or
15869 * acknowdledged in the interrupt controller.
15870 * @thread EMT(pVCpu)
15871 */
15872VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15873{
15874 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15875 if (pVCpu->iem.s.cActiveMappings)
15876 iemMemRollback(pVCpu);
15877 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15878}
15879
15880
15881/**
15882 * Interface for HM and EM to emulate VM-exit due to NMIs.
15883 *
15884 * @returns Strict VBox status code.
15885 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15886 * @thread EMT(pVCpu)
15887 */
15888VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmi(PVMCPU pVCpu)
15889{
15890 VBOXSTRICTRC rcStrict = iemVmxVmexitNmi(pVCpu);
15891 if (pVCpu->iem.s.cActiveMappings)
15892 iemMemRollback(pVCpu);
15893 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15894}
15895
15896
15897/**
15898 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15899 *
15900 * @returns Strict VBox status code.
15901 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15902 * @param uVector The SIPI vector.
15903 * @thread EMT(pVCpu)
15904 */
15905VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15906{
15907 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15908 if (pVCpu->iem.s.cActiveMappings)
15909 iemMemRollback(pVCpu);
15910 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15911}
15912
15913
15914/**
15915 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15916 *
15917 * @returns Strict VBox status code.
15918 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15919 * @thread EMT(pVCpu)
15920 */
15921VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15922{
15923 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15924 if (pVCpu->iem.s.cActiveMappings)
15925 iemMemRollback(pVCpu);
15926 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15927}
15928
15929
15930/**
15931 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15932 *
15933 * @returns Strict VBox status code.
15934 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15935 * @thread EMT(pVCpu)
15936 */
15937VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15938{
15939 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15940 if (pVCpu->iem.s.cActiveMappings)
15941 iemMemRollback(pVCpu);
15942 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15943}
15944
15945
15946/**
15947 * Interface for HM and EM to emulate VM-exits for NMI-windows.
15948 *
15949 * @returns Strict VBox status code.
15950 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15951 * @thread EMT(pVCpu)
15952 */
15953VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmiWindow(PVMCPU pVCpu)
15954{
15955 VBOXSTRICTRC rcStrict = iemVmxVmexitNmiWindow(pVCpu);
15956 if (pVCpu->iem.s.cActiveMappings)
15957 iemMemRollback(pVCpu);
15958 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15959}
15960
15961
15962/**
15963 * Interface for HM and EM to emulate VM-exits Monitor-Trap Flag (MTF).
15964 *
15965 * @returns Strict VBox status code.
15966 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15967 * @thread EMT(pVCpu)
15968 */
15969VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitMtf(PVMCPU pVCpu)
15970{
15971 VBOXSTRICTRC rcStrict = iemVmxVmexitMtf(pVCpu);
15972 if (pVCpu->iem.s.cActiveMappings)
15973 iemMemRollback(pVCpu);
15974 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15975}
15976
15977
15978/**
15979 * Interface for HM and EM to emulate the VMREAD instruction.
15980 *
15981 * @returns Strict VBox status code.
15982 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15983 * @param pExitInfo Pointer to the VM-exit information struct.
15984 * @thread EMT(pVCpu)
15985 */
15986VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15987{
15988 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15989 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15990 Assert(pExitInfo);
15991
15992 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15993
15994 VBOXSTRICTRC rcStrict;
15995 uint8_t const cbInstr = pExitInfo->cbInstr;
15996 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15997 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15998 {
15999 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
16000 {
16001 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16002 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
16003 }
16004 else
16005 {
16006 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16007 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
16008 }
16009 }
16010 else
16011 {
16012 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
16013 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16014 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
16015 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
16016 }
16017 Assert(!pVCpu->iem.s.cActiveMappings);
16018 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16019}
16020
16021
16022/**
16023 * Interface for HM and EM to emulate the VMWRITE instruction.
16024 *
16025 * @returns Strict VBox status code.
16026 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16027 * @param pExitInfo Pointer to the VM-exit information struct.
16028 * @thread EMT(pVCpu)
16029 */
16030VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16031{
16032 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16033 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16034 Assert(pExitInfo);
16035
16036 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16037
16038 uint64_t u64Val;
16039 uint8_t iEffSeg;
16040 IEMMODE enmEffAddrMode;
16041 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16042 {
16043 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16044 iEffSeg = UINT8_MAX;
16045 enmEffAddrMode = UINT8_MAX;
16046 }
16047 else
16048 {
16049 u64Val = pExitInfo->GCPtrEffAddr;
16050 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16051 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
16052 }
16053 uint8_t const cbInstr = pExitInfo->cbInstr;
16054 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16055 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
16056 Assert(!pVCpu->iem.s.cActiveMappings);
16057 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16058}
16059
16060
16061/**
16062 * Interface for HM and EM to emulate the VMPTRLD instruction.
16063 *
16064 * @returns Strict VBox status code.
16065 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16066 * @param pExitInfo Pointer to the VM-exit information struct.
16067 * @thread EMT(pVCpu)
16068 */
16069VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16070{
16071 Assert(pExitInfo);
16072 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16073 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16074
16075 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16076
16077 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16078 uint8_t const cbInstr = pExitInfo->cbInstr;
16079 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16080 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16081 Assert(!pVCpu->iem.s.cActiveMappings);
16082 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16083}
16084
16085
16086/**
16087 * Interface for HM and EM to emulate the VMPTRST instruction.
16088 *
16089 * @returns Strict VBox status code.
16090 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16091 * @param pExitInfo Pointer to the VM-exit information struct.
16092 * @thread EMT(pVCpu)
16093 */
16094VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16095{
16096 Assert(pExitInfo);
16097 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16098 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16099
16100 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16101
16102 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16103 uint8_t const cbInstr = pExitInfo->cbInstr;
16104 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16105 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16106 Assert(!pVCpu->iem.s.cActiveMappings);
16107 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16108}
16109
16110
16111/**
16112 * Interface for HM and EM to emulate the VMCLEAR instruction.
16113 *
16114 * @returns Strict VBox status code.
16115 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16116 * @param pExitInfo Pointer to the VM-exit information struct.
16117 * @thread EMT(pVCpu)
16118 */
16119VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16120{
16121 Assert(pExitInfo);
16122 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16123 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16124
16125 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16126
16127 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16128 uint8_t const cbInstr = pExitInfo->cbInstr;
16129 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16130 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16131 Assert(!pVCpu->iem.s.cActiveMappings);
16132 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16133}
16134
16135
16136/**
16137 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16138 *
16139 * @returns Strict VBox status code.
16140 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16141 * @param cbInstr The instruction length in bytes.
16142 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16143 * VMXINSTRID_VMRESUME).
16144 * @thread EMT(pVCpu)
16145 */
16146VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16147{
16148 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16149 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16150
16151 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16152 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16153 Assert(!pVCpu->iem.s.cActiveMappings);
16154 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16155}
16156
16157
16158/**
16159 * Interface for HM and EM to emulate the VMXON instruction.
16160 *
16161 * @returns Strict VBox status code.
16162 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16163 * @param pExitInfo Pointer to the VM-exit information struct.
16164 * @thread EMT(pVCpu)
16165 */
16166VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16167{
16168 Assert(pExitInfo);
16169 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16170 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16171
16172 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16173
16174 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16175 uint8_t const cbInstr = pExitInfo->cbInstr;
16176 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16177 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16178 Assert(!pVCpu->iem.s.cActiveMappings);
16179 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16180}
16181
16182
16183/**
16184 * Interface for HM and EM to emulate the VMXOFF instruction.
16185 *
16186 * @returns Strict VBox status code.
16187 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16188 * @param cbInstr The instruction length in bytes.
16189 * @thread EMT(pVCpu)
16190 */
16191VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16192{
16193 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16194 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16195
16196 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16197 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16198 Assert(!pVCpu->iem.s.cActiveMappings);
16199 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16200}
16201
16202
16203/**
16204 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16205 *
16206 * @remarks The @a pvUser argument is currently unused.
16207 */
16208PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16209 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16210 PGMACCESSORIGIN enmOrigin, void *pvUser)
16211{
16212 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16213
16214 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16215 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16216 {
16217 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16218 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16219
16220 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16221 * Currently they will go through as read accesses. */
16222 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16223 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16224 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16225 if (RT_FAILURE(rcStrict))
16226 return rcStrict;
16227
16228 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16229 return VINF_SUCCESS;
16230 }
16231
16232 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16233 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16234 if (RT_FAILURE(rc))
16235 return rc;
16236
16237 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16238 return VINF_PGM_HANDLER_DO_DEFAULT;
16239}
16240
16241#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16242
16243#ifdef IN_RING3
16244
16245/**
16246 * Handles the unlikely and probably fatal merge cases.
16247 *
16248 * @returns Merged status code.
16249 * @param rcStrict Current EM status code.
16250 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16251 * with @a rcStrict.
16252 * @param iMemMap The memory mapping index. For error reporting only.
16253 * @param pVCpu The cross context virtual CPU structure of the calling
16254 * thread, for error reporting only.
16255 */
16256DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16257 unsigned iMemMap, PVMCPU pVCpu)
16258{
16259 if (RT_FAILURE_NP(rcStrict))
16260 return rcStrict;
16261
16262 if (RT_FAILURE_NP(rcStrictCommit))
16263 return rcStrictCommit;
16264
16265 if (rcStrict == rcStrictCommit)
16266 return rcStrictCommit;
16267
16268 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16269 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16270 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16271 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16272 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16273 return VERR_IOM_FF_STATUS_IPE;
16274}
16275
16276
16277/**
16278 * Helper for IOMR3ProcessForceFlag.
16279 *
16280 * @returns Merged status code.
16281 * @param rcStrict Current EM status code.
16282 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16283 * with @a rcStrict.
16284 * @param iMemMap The memory mapping index. For error reporting only.
16285 * @param pVCpu The cross context virtual CPU structure of the calling
16286 * thread, for error reporting only.
16287 */
16288DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16289{
16290 /* Simple. */
16291 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16292 return rcStrictCommit;
16293
16294 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16295 return rcStrict;
16296
16297 /* EM scheduling status codes. */
16298 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16299 && rcStrict <= VINF_EM_LAST))
16300 {
16301 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16302 && rcStrictCommit <= VINF_EM_LAST))
16303 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16304 }
16305
16306 /* Unlikely */
16307 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16308}
16309
16310
16311/**
16312 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16313 *
16314 * @returns Merge between @a rcStrict and what the commit operation returned.
16315 * @param pVM The cross context VM structure.
16316 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16317 * @param rcStrict The status code returned by ring-0 or raw-mode.
16318 */
16319VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16320{
16321 /*
16322 * Reset the pending commit.
16323 */
16324 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16325 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16326 ("%#x %#x %#x\n",
16327 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16328 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16329
16330 /*
16331 * Commit the pending bounce buffers (usually just one).
16332 */
16333 unsigned cBufs = 0;
16334 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16335 while (iMemMap-- > 0)
16336 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16337 {
16338 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16339 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16340 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16341
16342 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16343 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16344 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16345
16346 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16347 {
16348 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16349 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16350 pbBuf,
16351 cbFirst,
16352 PGMACCESSORIGIN_IEM);
16353 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16354 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16355 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16356 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16357 }
16358
16359 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16360 {
16361 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16363 pbBuf + cbFirst,
16364 cbSecond,
16365 PGMACCESSORIGIN_IEM);
16366 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16367 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16368 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16369 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16370 }
16371 cBufs++;
16372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16373 }
16374
16375 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16376 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16377 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16378 pVCpu->iem.s.cActiveMappings = 0;
16379 return rcStrict;
16380}
16381
16382#endif /* IN_RING3 */
16383
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette