VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 79756

Last change on this file since 79756 was 79756, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Double fault and triple fault VM-exit fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 653.7 KB
Line 
1/* $Id: IEMAll.cpp 79756 2019-07-13 16:06:20Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handler.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
450 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
985IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
986IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
987#endif
988
989#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
990IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
991IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
992#endif
993
994
995/**
996 * Sets the pass up status.
997 *
998 * @returns VINF_SUCCESS.
999 * @param pVCpu The cross context virtual CPU structure of the
1000 * calling thread.
1001 * @param rcPassUp The pass up status. Must be informational.
1002 * VINF_SUCCESS is not allowed.
1003 */
1004IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1005{
1006 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1007
1008 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1009 if (rcOldPassUp == VINF_SUCCESS)
1010 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1011 /* If both are EM scheduling codes, use EM priority rules. */
1012 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1013 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1014 {
1015 if (rcPassUp < rcOldPassUp)
1016 {
1017 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1018 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1019 }
1020 else
1021 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 }
1023 /* Override EM scheduling with specific status code. */
1024 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1025 {
1026 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1028 }
1029 /* Don't override specific status code, first come first served. */
1030 else
1031 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1032 return VINF_SUCCESS;
1033}
1034
1035
1036/**
1037 * Calculates the CPU mode.
1038 *
1039 * This is mainly for updating IEMCPU::enmCpuMode.
1040 *
1041 * @returns CPU mode.
1042 * @param pVCpu The cross context virtual CPU structure of the
1043 * calling thread.
1044 */
1045DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1046{
1047 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1048 return IEMMODE_64BIT;
1049 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1050 return IEMMODE_32BIT;
1051 return IEMMODE_16BIT;
1052}
1053
1054
1055/**
1056 * Initializes the execution state.
1057 *
1058 * @param pVCpu The cross context virtual CPU structure of the
1059 * calling thread.
1060 * @param fBypassHandlers Whether to bypass access handlers.
1061 *
1062 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1063 * side-effects in strict builds.
1064 */
1065DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1066{
1067 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1068 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1069
1070#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1079#endif
1080
1081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1082 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1083#endif
1084 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1085 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1086#ifdef VBOX_STRICT
1087 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1088 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1089 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1090 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1091 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1092 pVCpu->iem.s.uRexReg = 127;
1093 pVCpu->iem.s.uRexB = 127;
1094 pVCpu->iem.s.offModRm = 127;
1095 pVCpu->iem.s.uRexIndex = 127;
1096 pVCpu->iem.s.iEffSeg = 127;
1097 pVCpu->iem.s.idxPrefix = 127;
1098 pVCpu->iem.s.uVex3rdReg = 127;
1099 pVCpu->iem.s.uVexLength = 127;
1100 pVCpu->iem.s.fEvexStuff = 127;
1101 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1102# ifdef IEM_WITH_CODE_TLB
1103 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1104 pVCpu->iem.s.pbInstrBuf = NULL;
1105 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1106 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1107 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1108 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1109# else
1110 pVCpu->iem.s.offOpcode = 127;
1111 pVCpu->iem.s.cbOpcode = 127;
1112# endif
1113#endif
1114
1115 pVCpu->iem.s.cActiveMappings = 0;
1116 pVCpu->iem.s.iNextMapping = 0;
1117 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1118 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1119#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1120 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1121 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1122 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1123 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1124 if (!pVCpu->iem.s.fInPatchCode)
1125 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1126#endif
1127}
1128
1129#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1130/**
1131 * Performs a minimal reinitialization of the execution state.
1132 *
1133 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1134 * 'world-switch' types operations on the CPU. Currently only nested
1135 * hardware-virtualization uses it.
1136 *
1137 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1138 */
1139IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1140{
1141 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1142 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1143
1144 pVCpu->iem.s.uCpl = uCpl;
1145 pVCpu->iem.s.enmCpuMode = enmMode;
1146 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1147 pVCpu->iem.s.enmEffAddrMode = enmMode;
1148 if (enmMode != IEMMODE_64BIT)
1149 {
1150 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1151 pVCpu->iem.s.enmEffOpSize = enmMode;
1152 }
1153 else
1154 {
1155 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1156 pVCpu->iem.s.enmEffOpSize = enmMode;
1157 }
1158 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1159#ifndef IEM_WITH_CODE_TLB
1160 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1161 pVCpu->iem.s.offOpcode = 0;
1162 pVCpu->iem.s.cbOpcode = 0;
1163#endif
1164 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1165}
1166#endif
1167
1168/**
1169 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1170 *
1171 * @param pVCpu The cross context virtual CPU structure of the
1172 * calling thread.
1173 */
1174DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1175{
1176 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1177#ifdef VBOX_STRICT
1178# ifdef IEM_WITH_CODE_TLB
1179 NOREF(pVCpu);
1180# else
1181 pVCpu->iem.s.cbOpcode = 0;
1182# endif
1183#else
1184 NOREF(pVCpu);
1185#endif
1186}
1187
1188
1189/**
1190 * Initializes the decoder state.
1191 *
1192 * iemReInitDecoder is mostly a copy of this function.
1193 *
1194 * @param pVCpu The cross context virtual CPU structure of the
1195 * calling thread.
1196 * @param fBypassHandlers Whether to bypass access handlers.
1197 */
1198DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1199{
1200 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1201 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1202
1203#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1212#endif
1213
1214#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1215 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1216#endif
1217 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1218 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1219 pVCpu->iem.s.enmCpuMode = enmMode;
1220 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1221 pVCpu->iem.s.enmEffAddrMode = enmMode;
1222 if (enmMode != IEMMODE_64BIT)
1223 {
1224 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1225 pVCpu->iem.s.enmEffOpSize = enmMode;
1226 }
1227 else
1228 {
1229 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1230 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1231 }
1232 pVCpu->iem.s.fPrefixes = 0;
1233 pVCpu->iem.s.uRexReg = 0;
1234 pVCpu->iem.s.uRexB = 0;
1235 pVCpu->iem.s.uRexIndex = 0;
1236 pVCpu->iem.s.idxPrefix = 0;
1237 pVCpu->iem.s.uVex3rdReg = 0;
1238 pVCpu->iem.s.uVexLength = 0;
1239 pVCpu->iem.s.fEvexStuff = 0;
1240 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1241#ifdef IEM_WITH_CODE_TLB
1242 pVCpu->iem.s.pbInstrBuf = NULL;
1243 pVCpu->iem.s.offInstrNextByte = 0;
1244 pVCpu->iem.s.offCurInstrStart = 0;
1245# ifdef VBOX_STRICT
1246 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1247 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1248 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1249# endif
1250#else
1251 pVCpu->iem.s.offOpcode = 0;
1252 pVCpu->iem.s.cbOpcode = 0;
1253#endif
1254 pVCpu->iem.s.offModRm = 0;
1255 pVCpu->iem.s.cActiveMappings = 0;
1256 pVCpu->iem.s.iNextMapping = 0;
1257 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1258 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1259#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1260 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1261 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1262 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1263 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1264 if (!pVCpu->iem.s.fInPatchCode)
1265 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1266#endif
1267
1268#ifdef DBGFTRACE_ENABLED
1269 switch (enmMode)
1270 {
1271 case IEMMODE_64BIT:
1272 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1273 break;
1274 case IEMMODE_32BIT:
1275 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1276 break;
1277 case IEMMODE_16BIT:
1278 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1279 break;
1280 }
1281#endif
1282}
1283
1284
1285/**
1286 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1287 *
1288 * This is mostly a copy of iemInitDecoder.
1289 *
1290 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1291 */
1292DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1293{
1294 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1295
1296#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1298 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1300 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1302 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1305#endif
1306
1307 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1308 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1309 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1310 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1311 pVCpu->iem.s.enmEffAddrMode = enmMode;
1312 if (enmMode != IEMMODE_64BIT)
1313 {
1314 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1315 pVCpu->iem.s.enmEffOpSize = enmMode;
1316 }
1317 else
1318 {
1319 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1320 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1321 }
1322 pVCpu->iem.s.fPrefixes = 0;
1323 pVCpu->iem.s.uRexReg = 0;
1324 pVCpu->iem.s.uRexB = 0;
1325 pVCpu->iem.s.uRexIndex = 0;
1326 pVCpu->iem.s.idxPrefix = 0;
1327 pVCpu->iem.s.uVex3rdReg = 0;
1328 pVCpu->iem.s.uVexLength = 0;
1329 pVCpu->iem.s.fEvexStuff = 0;
1330 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1331#ifdef IEM_WITH_CODE_TLB
1332 if (pVCpu->iem.s.pbInstrBuf)
1333 {
1334 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1335 - pVCpu->iem.s.uInstrBufPc;
1336 if (off < pVCpu->iem.s.cbInstrBufTotal)
1337 {
1338 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1339 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1340 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1341 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1342 else
1343 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1344 }
1345 else
1346 {
1347 pVCpu->iem.s.pbInstrBuf = NULL;
1348 pVCpu->iem.s.offInstrNextByte = 0;
1349 pVCpu->iem.s.offCurInstrStart = 0;
1350 pVCpu->iem.s.cbInstrBuf = 0;
1351 pVCpu->iem.s.cbInstrBufTotal = 0;
1352 }
1353 }
1354 else
1355 {
1356 pVCpu->iem.s.offInstrNextByte = 0;
1357 pVCpu->iem.s.offCurInstrStart = 0;
1358 pVCpu->iem.s.cbInstrBuf = 0;
1359 pVCpu->iem.s.cbInstrBufTotal = 0;
1360 }
1361#else
1362 pVCpu->iem.s.cbOpcode = 0;
1363 pVCpu->iem.s.offOpcode = 0;
1364#endif
1365 pVCpu->iem.s.offModRm = 0;
1366 Assert(pVCpu->iem.s.cActiveMappings == 0);
1367 pVCpu->iem.s.iNextMapping = 0;
1368 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1369 Assert(pVCpu->iem.s.fBypassHandlers == false);
1370#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1371 if (!pVCpu->iem.s.fInPatchCode)
1372 { /* likely */ }
1373 else
1374 {
1375 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1376 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1377 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1378 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1379 if (!pVCpu->iem.s.fInPatchCode)
1380 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1381 }
1382#endif
1383
1384#ifdef DBGFTRACE_ENABLED
1385 switch (enmMode)
1386 {
1387 case IEMMODE_64BIT:
1388 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1389 break;
1390 case IEMMODE_32BIT:
1391 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1392 break;
1393 case IEMMODE_16BIT:
1394 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1395 break;
1396 }
1397#endif
1398}
1399
1400
1401
1402/**
1403 * Prefetch opcodes the first time when starting executing.
1404 *
1405 * @returns Strict VBox status code.
1406 * @param pVCpu The cross context virtual CPU structure of the
1407 * calling thread.
1408 * @param fBypassHandlers Whether to bypass access handlers.
1409 */
1410IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1411{
1412 iemInitDecoder(pVCpu, fBypassHandlers);
1413
1414#ifdef IEM_WITH_CODE_TLB
1415 /** @todo Do ITLB lookup here. */
1416
1417#else /* !IEM_WITH_CODE_TLB */
1418
1419 /*
1420 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1421 *
1422 * First translate CS:rIP to a physical address.
1423 */
1424 uint32_t cbToTryRead;
1425 RTGCPTR GCPtrPC;
1426 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1427 {
1428 cbToTryRead = PAGE_SIZE;
1429 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1430 if (IEM_IS_CANONICAL(GCPtrPC))
1431 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1432 else
1433 return iemRaiseGeneralProtectionFault0(pVCpu);
1434 }
1435 else
1436 {
1437 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1438 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1439 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1440 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1441 else
1442 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1443 if (cbToTryRead) { /* likely */ }
1444 else /* overflowed */
1445 {
1446 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1447 cbToTryRead = UINT32_MAX;
1448 }
1449 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1450 Assert(GCPtrPC <= UINT32_MAX);
1451 }
1452
1453# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1454 /* Allow interpretation of patch manager code blocks since they can for
1455 instance throw #PFs for perfectly good reasons. */
1456 if (pVCpu->iem.s.fInPatchCode)
1457 {
1458 size_t cbRead = 0;
1459 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1460 AssertRCReturn(rc, rc);
1461 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1462 return VINF_SUCCESS;
1463 }
1464# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1465
1466 RTGCPHYS GCPhys;
1467 uint64_t fFlags;
1468 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1469 if (RT_SUCCESS(rc)) { /* probable */ }
1470 else
1471 {
1472 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1473 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1474 }
1475 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1476 else
1477 {
1478 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1479 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1480 }
1481 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1482 else
1483 {
1484 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1485 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1486 }
1487 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1488 /** @todo Check reserved bits and such stuff. PGM is better at doing
1489 * that, so do it when implementing the guest virtual address
1490 * TLB... */
1491
1492 /*
1493 * Read the bytes at this address.
1494 */
1495 PVM pVM = pVCpu->CTX_SUFF(pVM);
1496# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1497 size_t cbActual;
1498 if ( PATMIsEnabled(pVM)
1499 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1500 {
1501 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1502 Assert(cbActual > 0);
1503 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1504 }
1505 else
1506# endif
1507 {
1508 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1509 if (cbToTryRead > cbLeftOnPage)
1510 cbToTryRead = cbLeftOnPage;
1511 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1512 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1513
1514 if (!pVCpu->iem.s.fBypassHandlers)
1515 {
1516 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1517 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1518 { /* likely */ }
1519 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1520 {
1521 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1522 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1523 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1524 }
1525 else
1526 {
1527 Log((RT_SUCCESS(rcStrict)
1528 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1529 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1530 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1531 return rcStrict;
1532 }
1533 }
1534 else
1535 {
1536 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1537 if (RT_SUCCESS(rc))
1538 { /* likely */ }
1539 else
1540 {
1541 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1542 GCPtrPC, GCPhys, rc, cbToTryRead));
1543 return rc;
1544 }
1545 }
1546 pVCpu->iem.s.cbOpcode = cbToTryRead;
1547 }
1548#endif /* !IEM_WITH_CODE_TLB */
1549 return VINF_SUCCESS;
1550}
1551
1552
1553/**
1554 * Invalidates the IEM TLBs.
1555 *
1556 * This is called internally as well as by PGM when moving GC mappings.
1557 *
1558 * @returns
1559 * @param pVCpu The cross context virtual CPU structure of the calling
1560 * thread.
1561 * @param fVmm Set when PGM calls us with a remapping.
1562 */
1563VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1564{
1565#ifdef IEM_WITH_CODE_TLB
1566 pVCpu->iem.s.cbInstrBufTotal = 0;
1567 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1568 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1569 { /* very likely */ }
1570 else
1571 {
1572 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1573 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1574 while (i-- > 0)
1575 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1576 }
1577#endif
1578
1579#ifdef IEM_WITH_DATA_TLB
1580 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1581 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1582 { /* very likely */ }
1583 else
1584 {
1585 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1586 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1587 while (i-- > 0)
1588 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1589 }
1590#endif
1591 NOREF(pVCpu); NOREF(fVmm);
1592}
1593
1594
1595/**
1596 * Invalidates a page in the TLBs.
1597 *
1598 * @param pVCpu The cross context virtual CPU structure of the calling
1599 * thread.
1600 * @param GCPtr The address of the page to invalidate
1601 */
1602VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1603{
1604#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1605 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1606 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1607 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1608 uintptr_t idx = (uint8_t)GCPtr;
1609
1610# ifdef IEM_WITH_CODE_TLB
1611 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1612 {
1613 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1614 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1615 pVCpu->iem.s.cbInstrBufTotal = 0;
1616 }
1617# endif
1618
1619# ifdef IEM_WITH_DATA_TLB
1620 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1621 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1622# endif
1623#else
1624 NOREF(pVCpu); NOREF(GCPtr);
1625#endif
1626}
1627
1628
1629/**
1630 * Invalidates the host physical aspects of the IEM TLBs.
1631 *
1632 * This is called internally as well as by PGM when moving GC mappings.
1633 *
1634 * @param pVCpu The cross context virtual CPU structure of the calling
1635 * thread.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1638{
1639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1640 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1641
1642# ifdef IEM_WITH_CODE_TLB
1643 pVCpu->iem.s.cbInstrBufTotal = 0;
1644# endif
1645 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1646 if (uTlbPhysRev != 0)
1647 {
1648 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1649 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1650 }
1651 else
1652 {
1653 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1654 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1655
1656 unsigned i;
1657# ifdef IEM_WITH_CODE_TLB
1658 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1659 while (i-- > 0)
1660 {
1661 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1662 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1663 }
1664# endif
1665# ifdef IEM_WITH_DATA_TLB
1666 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1667 while (i-- > 0)
1668 {
1669 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1670 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1671 }
1672# endif
1673 }
1674#else
1675 NOREF(pVCpu);
1676#endif
1677}
1678
1679
1680/**
1681 * Invalidates the host physical aspects of the IEM TLBs.
1682 *
1683 * This is called internally as well as by PGM when moving GC mappings.
1684 *
1685 * @param pVM The cross context VM structure.
1686 *
1687 * @remarks Caller holds the PGM lock.
1688 */
1689VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1690{
1691 RT_NOREF_PV(pVM);
1692}
1693
1694#ifdef IEM_WITH_CODE_TLB
1695
1696/**
1697 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1698 * failure and jumps.
1699 *
1700 * We end up here for a number of reasons:
1701 * - pbInstrBuf isn't yet initialized.
1702 * - Advancing beyond the buffer boundrary (e.g. cross page).
1703 * - Advancing beyond the CS segment limit.
1704 * - Fetching from non-mappable page (e.g. MMIO).
1705 *
1706 * @param pVCpu The cross context virtual CPU structure of the
1707 * calling thread.
1708 * @param pvDst Where to return the bytes.
1709 * @param cbDst Number of bytes to read.
1710 *
1711 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1712 */
1713IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1714{
1715#ifdef IN_RING3
1716 for (;;)
1717 {
1718 Assert(cbDst <= 8);
1719 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1720
1721 /*
1722 * We might have a partial buffer match, deal with that first to make the
1723 * rest simpler. This is the first part of the cross page/buffer case.
1724 */
1725 if (pVCpu->iem.s.pbInstrBuf != NULL)
1726 {
1727 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1728 {
1729 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1730 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1731 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1732
1733 cbDst -= cbCopy;
1734 pvDst = (uint8_t *)pvDst + cbCopy;
1735 offBuf += cbCopy;
1736 pVCpu->iem.s.offInstrNextByte += offBuf;
1737 }
1738 }
1739
1740 /*
1741 * Check segment limit, figuring how much we're allowed to access at this point.
1742 *
1743 * We will fault immediately if RIP is past the segment limit / in non-canonical
1744 * territory. If we do continue, there are one or more bytes to read before we
1745 * end up in trouble and we need to do that first before faulting.
1746 */
1747 RTGCPTR GCPtrFirst;
1748 uint32_t cbMaxRead;
1749 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1750 {
1751 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1752 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1753 { /* likely */ }
1754 else
1755 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1756 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1757 }
1758 else
1759 {
1760 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1761 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1762 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1763 { /* likely */ }
1764 else
1765 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1766 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1767 if (cbMaxRead != 0)
1768 { /* likely */ }
1769 else
1770 {
1771 /* Overflowed because address is 0 and limit is max. */
1772 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1773 cbMaxRead = X86_PAGE_SIZE;
1774 }
1775 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1776 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1777 if (cbMaxRead2 < cbMaxRead)
1778 cbMaxRead = cbMaxRead2;
1779 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1780 }
1781
1782 /*
1783 * Get the TLB entry for this piece of code.
1784 */
1785 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1786 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1787 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1788 if (pTlbe->uTag == uTag)
1789 {
1790 /* likely when executing lots of code, otherwise unlikely */
1791# ifdef VBOX_WITH_STATISTICS
1792 pVCpu->iem.s.CodeTlb.cTlbHits++;
1793# endif
1794 }
1795 else
1796 {
1797 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1798# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1799 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1800 {
1801 pTlbe->uTag = uTag;
1802 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1803 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1804 pTlbe->GCPhys = NIL_RTGCPHYS;
1805 pTlbe->pbMappingR3 = NULL;
1806 }
1807 else
1808# endif
1809 {
1810 RTGCPHYS GCPhys;
1811 uint64_t fFlags;
1812 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1813 if (RT_FAILURE(rc))
1814 {
1815 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1816 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1817 }
1818
1819 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1820 pTlbe->uTag = uTag;
1821 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1822 pTlbe->GCPhys = GCPhys;
1823 pTlbe->pbMappingR3 = NULL;
1824 }
1825 }
1826
1827 /*
1828 * Check TLB page table level access flags.
1829 */
1830 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1831 {
1832 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1833 {
1834 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1835 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1836 }
1837 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1838 {
1839 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1840 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1841 }
1842 }
1843
1844# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1845 /*
1846 * Allow interpretation of patch manager code blocks since they can for
1847 * instance throw #PFs for perfectly good reasons.
1848 */
1849 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1850 { /* no unlikely */ }
1851 else
1852 {
1853 /** @todo Could be optimized this a little in ring-3 if we liked. */
1854 size_t cbRead = 0;
1855 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1856 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1857 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1858 return;
1859 }
1860# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1861
1862 /*
1863 * Look up the physical page info if necessary.
1864 */
1865 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1866 { /* not necessary */ }
1867 else
1868 {
1869 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1870 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1871 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1872 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1873 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1874 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1875 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1876 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1877 }
1878
1879# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1880 /*
1881 * Try do a direct read using the pbMappingR3 pointer.
1882 */
1883 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1884 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1885 {
1886 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1887 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1888 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1889 {
1890 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1891 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1892 }
1893 else
1894 {
1895 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1896 Assert(cbInstr < cbMaxRead);
1897 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1898 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1899 }
1900 if (cbDst <= cbMaxRead)
1901 {
1902 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1903 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1904 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1905 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1906 return;
1907 }
1908 pVCpu->iem.s.pbInstrBuf = NULL;
1909
1910 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1911 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1912 }
1913 else
1914# endif
1915#if 0
1916 /*
1917 * If there is no special read handling, so we can read a bit more and
1918 * put it in the prefetch buffer.
1919 */
1920 if ( cbDst < cbMaxRead
1921 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1922 {
1923 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1924 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1925 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1926 { /* likely */ }
1927 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1928 {
1929 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1930 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1931 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1932 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1933 }
1934 else
1935 {
1936 Log((RT_SUCCESS(rcStrict)
1937 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1938 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1939 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1940 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1941 }
1942 }
1943 /*
1944 * Special read handling, so only read exactly what's needed.
1945 * This is a highly unlikely scenario.
1946 */
1947 else
1948#endif
1949 {
1950 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1951 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1952 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1953 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1954 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1955 { /* likely */ }
1956 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1957 {
1958 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1959 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1960 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1961 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1962 }
1963 else
1964 {
1965 Log((RT_SUCCESS(rcStrict)
1966 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1967 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1968 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1969 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1970 }
1971 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1972 if (cbToRead == cbDst)
1973 return;
1974 }
1975
1976 /*
1977 * More to read, loop.
1978 */
1979 cbDst -= cbMaxRead;
1980 pvDst = (uint8_t *)pvDst + cbMaxRead;
1981 }
1982#else
1983 RT_NOREF(pvDst, cbDst);
1984 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1985#endif
1986}
1987
1988#else
1989
1990/**
1991 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1992 * exception if it fails.
1993 *
1994 * @returns Strict VBox status code.
1995 * @param pVCpu The cross context virtual CPU structure of the
1996 * calling thread.
1997 * @param cbMin The minimum number of bytes relative offOpcode
1998 * that must be read.
1999 */
2000IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2001{
2002 /*
2003 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2004 *
2005 * First translate CS:rIP to a physical address.
2006 */
2007 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2008 uint32_t cbToTryRead;
2009 RTGCPTR GCPtrNext;
2010 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2011 {
2012 cbToTryRead = PAGE_SIZE;
2013 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2014 if (!IEM_IS_CANONICAL(GCPtrNext))
2015 return iemRaiseGeneralProtectionFault0(pVCpu);
2016 }
2017 else
2018 {
2019 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2020 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2021 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2022 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2023 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2024 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2025 if (!cbToTryRead) /* overflowed */
2026 {
2027 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2028 cbToTryRead = UINT32_MAX;
2029 /** @todo check out wrapping around the code segment. */
2030 }
2031 if (cbToTryRead < cbMin - cbLeft)
2032 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2033 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2034 }
2035
2036 /* Only read up to the end of the page, and make sure we don't read more
2037 than the opcode buffer can hold. */
2038 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2039 if (cbToTryRead > cbLeftOnPage)
2040 cbToTryRead = cbLeftOnPage;
2041 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2042 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2043/** @todo r=bird: Convert assertion into undefined opcode exception? */
2044 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2045
2046# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2047 /* Allow interpretation of patch manager code blocks since they can for
2048 instance throw #PFs for perfectly good reasons. */
2049 if (pVCpu->iem.s.fInPatchCode)
2050 {
2051 size_t cbRead = 0;
2052 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2053 AssertRCReturn(rc, rc);
2054 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2055 return VINF_SUCCESS;
2056 }
2057# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2058
2059 RTGCPHYS GCPhys;
2060 uint64_t fFlags;
2061 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2062 if (RT_FAILURE(rc))
2063 {
2064 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2065 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2066 }
2067 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2068 {
2069 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2070 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2071 }
2072 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2073 {
2074 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2075 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2076 }
2077 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2078 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2079 /** @todo Check reserved bits and such stuff. PGM is better at doing
2080 * that, so do it when implementing the guest virtual address
2081 * TLB... */
2082
2083 /*
2084 * Read the bytes at this address.
2085 *
2086 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2087 * and since PATM should only patch the start of an instruction there
2088 * should be no need to check again here.
2089 */
2090 if (!pVCpu->iem.s.fBypassHandlers)
2091 {
2092 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2093 cbToTryRead, PGMACCESSORIGIN_IEM);
2094 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2095 { /* likely */ }
2096 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2097 {
2098 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2099 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2100 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2101 }
2102 else
2103 {
2104 Log((RT_SUCCESS(rcStrict)
2105 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2106 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2107 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2108 return rcStrict;
2109 }
2110 }
2111 else
2112 {
2113 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2114 if (RT_SUCCESS(rc))
2115 { /* likely */ }
2116 else
2117 {
2118 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2119 return rc;
2120 }
2121 }
2122 pVCpu->iem.s.cbOpcode += cbToTryRead;
2123 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2124
2125 return VINF_SUCCESS;
2126}
2127
2128#endif /* !IEM_WITH_CODE_TLB */
2129#ifndef IEM_WITH_SETJMP
2130
2131/**
2132 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2133 *
2134 * @returns Strict VBox status code.
2135 * @param pVCpu The cross context virtual CPU structure of the
2136 * calling thread.
2137 * @param pb Where to return the opcode byte.
2138 */
2139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2140{
2141 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2142 if (rcStrict == VINF_SUCCESS)
2143 {
2144 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2145 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2146 pVCpu->iem.s.offOpcode = offOpcode + 1;
2147 }
2148 else
2149 *pb = 0;
2150 return rcStrict;
2151}
2152
2153
2154/**
2155 * Fetches the next opcode byte.
2156 *
2157 * @returns Strict VBox status code.
2158 * @param pVCpu The cross context virtual CPU structure of the
2159 * calling thread.
2160 * @param pu8 Where to return the opcode byte.
2161 */
2162DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2163{
2164 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2165 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2166 {
2167 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2168 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2169 return VINF_SUCCESS;
2170 }
2171 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2172}
2173
2174#else /* IEM_WITH_SETJMP */
2175
2176/**
2177 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2178 *
2179 * @returns The opcode byte.
2180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2181 */
2182DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2183{
2184# ifdef IEM_WITH_CODE_TLB
2185 uint8_t u8;
2186 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2187 return u8;
2188# else
2189 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2190 if (rcStrict == VINF_SUCCESS)
2191 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2192 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2193# endif
2194}
2195
2196
2197/**
2198 * Fetches the next opcode byte, longjmp on error.
2199 *
2200 * @returns The opcode byte.
2201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2202 */
2203DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2204{
2205# ifdef IEM_WITH_CODE_TLB
2206 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2207 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2208 if (RT_LIKELY( pbBuf != NULL
2209 && offBuf < pVCpu->iem.s.cbInstrBuf))
2210 {
2211 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2212 return pbBuf[offBuf];
2213 }
2214# else
2215 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2216 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2217 {
2218 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2219 return pVCpu->iem.s.abOpcode[offOpcode];
2220 }
2221# endif
2222 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2223}
2224
2225#endif /* IEM_WITH_SETJMP */
2226
2227/**
2228 * Fetches the next opcode byte, returns automatically on failure.
2229 *
2230 * @param a_pu8 Where to return the opcode byte.
2231 * @remark Implicitly references pVCpu.
2232 */
2233#ifndef IEM_WITH_SETJMP
2234# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2235 do \
2236 { \
2237 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2238 if (rcStrict2 == VINF_SUCCESS) \
2239 { /* likely */ } \
2240 else \
2241 return rcStrict2; \
2242 } while (0)
2243#else
2244# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2245#endif /* IEM_WITH_SETJMP */
2246
2247
2248#ifndef IEM_WITH_SETJMP
2249/**
2250 * Fetches the next signed byte from the opcode stream.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pi8 Where to return the signed byte.
2255 */
2256DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2257{
2258 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2259}
2260#endif /* !IEM_WITH_SETJMP */
2261
2262
2263/**
2264 * Fetches the next signed byte from the opcode stream, returning automatically
2265 * on failure.
2266 *
2267 * @param a_pi8 Where to return the signed byte.
2268 * @remark Implicitly references pVCpu.
2269 */
2270#ifndef IEM_WITH_SETJMP
2271# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2272 do \
2273 { \
2274 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2275 if (rcStrict2 != VINF_SUCCESS) \
2276 return rcStrict2; \
2277 } while (0)
2278#else /* IEM_WITH_SETJMP */
2279# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2280
2281#endif /* IEM_WITH_SETJMP */
2282
2283#ifndef IEM_WITH_SETJMP
2284
2285/**
2286 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2287 *
2288 * @returns Strict VBox status code.
2289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2290 * @param pu16 Where to return the opcode dword.
2291 */
2292DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2293{
2294 uint8_t u8;
2295 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2296 if (rcStrict == VINF_SUCCESS)
2297 *pu16 = (int8_t)u8;
2298 return rcStrict;
2299}
2300
2301
2302/**
2303 * Fetches the next signed byte from the opcode stream, extending it to
2304 * unsigned 16-bit.
2305 *
2306 * @returns Strict VBox status code.
2307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2308 * @param pu16 Where to return the unsigned word.
2309 */
2310DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2311{
2312 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2313 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2314 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2315
2316 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2317 pVCpu->iem.s.offOpcode = offOpcode + 1;
2318 return VINF_SUCCESS;
2319}
2320
2321#endif /* !IEM_WITH_SETJMP */
2322
2323/**
2324 * Fetches the next signed byte from the opcode stream and sign-extending it to
2325 * a word, returning automatically on failure.
2326 *
2327 * @param a_pu16 Where to return the word.
2328 * @remark Implicitly references pVCpu.
2329 */
2330#ifndef IEM_WITH_SETJMP
2331# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2332 do \
2333 { \
2334 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2335 if (rcStrict2 != VINF_SUCCESS) \
2336 return rcStrict2; \
2337 } while (0)
2338#else
2339# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2340#endif
2341
2342#ifndef IEM_WITH_SETJMP
2343
2344/**
2345 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2346 *
2347 * @returns Strict VBox status code.
2348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2349 * @param pu32 Where to return the opcode dword.
2350 */
2351DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2352{
2353 uint8_t u8;
2354 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2355 if (rcStrict == VINF_SUCCESS)
2356 *pu32 = (int8_t)u8;
2357 return rcStrict;
2358}
2359
2360
2361/**
2362 * Fetches the next signed byte from the opcode stream, extending it to
2363 * unsigned 32-bit.
2364 *
2365 * @returns Strict VBox status code.
2366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2367 * @param pu32 Where to return the unsigned dword.
2368 */
2369DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2370{
2371 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2372 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2373 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2374
2375 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2376 pVCpu->iem.s.offOpcode = offOpcode + 1;
2377 return VINF_SUCCESS;
2378}
2379
2380#endif /* !IEM_WITH_SETJMP */
2381
2382/**
2383 * Fetches the next signed byte from the opcode stream and sign-extending it to
2384 * a word, returning automatically on failure.
2385 *
2386 * @param a_pu32 Where to return the word.
2387 * @remark Implicitly references pVCpu.
2388 */
2389#ifndef IEM_WITH_SETJMP
2390#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2391 do \
2392 { \
2393 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2394 if (rcStrict2 != VINF_SUCCESS) \
2395 return rcStrict2; \
2396 } while (0)
2397#else
2398# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2399#endif
2400
2401#ifndef IEM_WITH_SETJMP
2402
2403/**
2404 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2405 *
2406 * @returns Strict VBox status code.
2407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2408 * @param pu64 Where to return the opcode qword.
2409 */
2410DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2411{
2412 uint8_t u8;
2413 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2414 if (rcStrict == VINF_SUCCESS)
2415 *pu64 = (int8_t)u8;
2416 return rcStrict;
2417}
2418
2419
2420/**
2421 * Fetches the next signed byte from the opcode stream, extending it to
2422 * unsigned 64-bit.
2423 *
2424 * @returns Strict VBox status code.
2425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2426 * @param pu64 Where to return the unsigned qword.
2427 */
2428DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2429{
2430 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2431 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2432 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2433
2434 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2435 pVCpu->iem.s.offOpcode = offOpcode + 1;
2436 return VINF_SUCCESS;
2437}
2438
2439#endif /* !IEM_WITH_SETJMP */
2440
2441
2442/**
2443 * Fetches the next signed byte from the opcode stream and sign-extending it to
2444 * a word, returning automatically on failure.
2445 *
2446 * @param a_pu64 Where to return the word.
2447 * @remark Implicitly references pVCpu.
2448 */
2449#ifndef IEM_WITH_SETJMP
2450# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2451 do \
2452 { \
2453 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2454 if (rcStrict2 != VINF_SUCCESS) \
2455 return rcStrict2; \
2456 } while (0)
2457#else
2458# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2459#endif
2460
2461
2462#ifndef IEM_WITH_SETJMP
2463/**
2464 * Fetches the next opcode byte.
2465 *
2466 * @returns Strict VBox status code.
2467 * @param pVCpu The cross context virtual CPU structure of the
2468 * calling thread.
2469 * @param pu8 Where to return the opcode byte.
2470 */
2471DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2472{
2473 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2474 pVCpu->iem.s.offModRm = offOpcode;
2475 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2476 {
2477 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2478 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2479 return VINF_SUCCESS;
2480 }
2481 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2482}
2483#else /* IEM_WITH_SETJMP */
2484/**
2485 * Fetches the next opcode byte, longjmp on error.
2486 *
2487 * @returns The opcode byte.
2488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2489 */
2490DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2491{
2492# ifdef IEM_WITH_CODE_TLB
2493 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2494 pVCpu->iem.s.offModRm = offBuf;
2495 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2496 if (RT_LIKELY( pbBuf != NULL
2497 && offBuf < pVCpu->iem.s.cbInstrBuf))
2498 {
2499 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2500 return pbBuf[offBuf];
2501 }
2502# else
2503 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2504 pVCpu->iem.s.offModRm = offOpcode;
2505 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2506 {
2507 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2508 return pVCpu->iem.s.abOpcode[offOpcode];
2509 }
2510# endif
2511 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2512}
2513#endif /* IEM_WITH_SETJMP */
2514
2515/**
2516 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2517 * on failure.
2518 *
2519 * Will note down the position of the ModR/M byte for VT-x exits.
2520 *
2521 * @param a_pbRm Where to return the RM opcode byte.
2522 * @remark Implicitly references pVCpu.
2523 */
2524#ifndef IEM_WITH_SETJMP
2525# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2526 do \
2527 { \
2528 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2529 if (rcStrict2 == VINF_SUCCESS) \
2530 { /* likely */ } \
2531 else \
2532 return rcStrict2; \
2533 } while (0)
2534#else
2535# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2536#endif /* IEM_WITH_SETJMP */
2537
2538
2539#ifndef IEM_WITH_SETJMP
2540
2541/**
2542 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2543 *
2544 * @returns Strict VBox status code.
2545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2546 * @param pu16 Where to return the opcode word.
2547 */
2548DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2549{
2550 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2551 if (rcStrict == VINF_SUCCESS)
2552 {
2553 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2554# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2555 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2556# else
2557 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2558# endif
2559 pVCpu->iem.s.offOpcode = offOpcode + 2;
2560 }
2561 else
2562 *pu16 = 0;
2563 return rcStrict;
2564}
2565
2566
2567/**
2568 * Fetches the next opcode word.
2569 *
2570 * @returns Strict VBox status code.
2571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2572 * @param pu16 Where to return the opcode word.
2573 */
2574DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2575{
2576 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2577 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2578 {
2579 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2580# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2581 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2582# else
2583 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2584# endif
2585 return VINF_SUCCESS;
2586 }
2587 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2588}
2589
2590#else /* IEM_WITH_SETJMP */
2591
2592/**
2593 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2594 *
2595 * @returns The opcode word.
2596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2597 */
2598DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2599{
2600# ifdef IEM_WITH_CODE_TLB
2601 uint16_t u16;
2602 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2603 return u16;
2604# else
2605 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2606 if (rcStrict == VINF_SUCCESS)
2607 {
2608 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2609 pVCpu->iem.s.offOpcode += 2;
2610# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2611 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2612# else
2613 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2614# endif
2615 }
2616 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2617# endif
2618}
2619
2620
2621/**
2622 * Fetches the next opcode word, longjmp on error.
2623 *
2624 * @returns The opcode word.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 */
2627DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2628{
2629# ifdef IEM_WITH_CODE_TLB
2630 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2631 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2632 if (RT_LIKELY( pbBuf != NULL
2633 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2634 {
2635 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2636# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2637 return *(uint16_t const *)&pbBuf[offBuf];
2638# else
2639 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2640# endif
2641 }
2642# else
2643 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2644 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2645 {
2646 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2647# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2648 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2649# else
2650 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2651# endif
2652 }
2653# endif
2654 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2655}
2656
2657#endif /* IEM_WITH_SETJMP */
2658
2659
2660/**
2661 * Fetches the next opcode word, returns automatically on failure.
2662 *
2663 * @param a_pu16 Where to return the opcode word.
2664 * @remark Implicitly references pVCpu.
2665 */
2666#ifndef IEM_WITH_SETJMP
2667# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2668 do \
2669 { \
2670 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2671 if (rcStrict2 != VINF_SUCCESS) \
2672 return rcStrict2; \
2673 } while (0)
2674#else
2675# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2676#endif
2677
2678#ifndef IEM_WITH_SETJMP
2679
2680/**
2681 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2682 *
2683 * @returns Strict VBox status code.
2684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2685 * @param pu32 Where to return the opcode double word.
2686 */
2687DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2688{
2689 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2690 if (rcStrict == VINF_SUCCESS)
2691 {
2692 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2693 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2694 pVCpu->iem.s.offOpcode = offOpcode + 2;
2695 }
2696 else
2697 *pu32 = 0;
2698 return rcStrict;
2699}
2700
2701
2702/**
2703 * Fetches the next opcode word, zero extending it to a double word.
2704 *
2705 * @returns Strict VBox status code.
2706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2707 * @param pu32 Where to return the opcode double word.
2708 */
2709DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2710{
2711 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2712 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2713 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2714
2715 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2716 pVCpu->iem.s.offOpcode = offOpcode + 2;
2717 return VINF_SUCCESS;
2718}
2719
2720#endif /* !IEM_WITH_SETJMP */
2721
2722
2723/**
2724 * Fetches the next opcode word and zero extends it to a double word, returns
2725 * automatically on failure.
2726 *
2727 * @param a_pu32 Where to return the opcode double word.
2728 * @remark Implicitly references pVCpu.
2729 */
2730#ifndef IEM_WITH_SETJMP
2731# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2732 do \
2733 { \
2734 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2735 if (rcStrict2 != VINF_SUCCESS) \
2736 return rcStrict2; \
2737 } while (0)
2738#else
2739# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2740#endif
2741
2742#ifndef IEM_WITH_SETJMP
2743
2744/**
2745 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2746 *
2747 * @returns Strict VBox status code.
2748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2749 * @param pu64 Where to return the opcode quad word.
2750 */
2751DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2752{
2753 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2754 if (rcStrict == VINF_SUCCESS)
2755 {
2756 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2757 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2758 pVCpu->iem.s.offOpcode = offOpcode + 2;
2759 }
2760 else
2761 *pu64 = 0;
2762 return rcStrict;
2763}
2764
2765
2766/**
2767 * Fetches the next opcode word, zero extending it to a quad word.
2768 *
2769 * @returns Strict VBox status code.
2770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2771 * @param pu64 Where to return the opcode quad word.
2772 */
2773DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2774{
2775 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2776 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2777 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2778
2779 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2780 pVCpu->iem.s.offOpcode = offOpcode + 2;
2781 return VINF_SUCCESS;
2782}
2783
2784#endif /* !IEM_WITH_SETJMP */
2785
2786/**
2787 * Fetches the next opcode word and zero extends it to a quad word, returns
2788 * automatically on failure.
2789 *
2790 * @param a_pu64 Where to return the opcode quad word.
2791 * @remark Implicitly references pVCpu.
2792 */
2793#ifndef IEM_WITH_SETJMP
2794# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2795 do \
2796 { \
2797 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2798 if (rcStrict2 != VINF_SUCCESS) \
2799 return rcStrict2; \
2800 } while (0)
2801#else
2802# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2803#endif
2804
2805
2806#ifndef IEM_WITH_SETJMP
2807/**
2808 * Fetches the next signed word from the opcode stream.
2809 *
2810 * @returns Strict VBox status code.
2811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2812 * @param pi16 Where to return the signed word.
2813 */
2814DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2815{
2816 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2817}
2818#endif /* !IEM_WITH_SETJMP */
2819
2820
2821/**
2822 * Fetches the next signed word from the opcode stream, returning automatically
2823 * on failure.
2824 *
2825 * @param a_pi16 Where to return the signed word.
2826 * @remark Implicitly references pVCpu.
2827 */
2828#ifndef IEM_WITH_SETJMP
2829# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2830 do \
2831 { \
2832 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2833 if (rcStrict2 != VINF_SUCCESS) \
2834 return rcStrict2; \
2835 } while (0)
2836#else
2837# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2838#endif
2839
2840#ifndef IEM_WITH_SETJMP
2841
2842/**
2843 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2844 *
2845 * @returns Strict VBox status code.
2846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2847 * @param pu32 Where to return the opcode dword.
2848 */
2849DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2850{
2851 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2852 if (rcStrict == VINF_SUCCESS)
2853 {
2854 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2855# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2856 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2857# else
2858 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2859 pVCpu->iem.s.abOpcode[offOpcode + 1],
2860 pVCpu->iem.s.abOpcode[offOpcode + 2],
2861 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2862# endif
2863 pVCpu->iem.s.offOpcode = offOpcode + 4;
2864 }
2865 else
2866 *pu32 = 0;
2867 return rcStrict;
2868}
2869
2870
2871/**
2872 * Fetches the next opcode dword.
2873 *
2874 * @returns Strict VBox status code.
2875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2876 * @param pu32 Where to return the opcode double word.
2877 */
2878DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2879{
2880 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2881 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2882 {
2883 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2884# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2885 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2886# else
2887 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2888 pVCpu->iem.s.abOpcode[offOpcode + 1],
2889 pVCpu->iem.s.abOpcode[offOpcode + 2],
2890 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2891# endif
2892 return VINF_SUCCESS;
2893 }
2894 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2895}
2896
2897#else /* !IEM_WITH_SETJMP */
2898
2899/**
2900 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2901 *
2902 * @returns The opcode dword.
2903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2904 */
2905DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2906{
2907# ifdef IEM_WITH_CODE_TLB
2908 uint32_t u32;
2909 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2910 return u32;
2911# else
2912 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2913 if (rcStrict == VINF_SUCCESS)
2914 {
2915 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2916 pVCpu->iem.s.offOpcode = offOpcode + 4;
2917# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2918 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2919# else
2920 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2921 pVCpu->iem.s.abOpcode[offOpcode + 1],
2922 pVCpu->iem.s.abOpcode[offOpcode + 2],
2923 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2924# endif
2925 }
2926 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2927# endif
2928}
2929
2930
2931/**
2932 * Fetches the next opcode dword, longjmp on error.
2933 *
2934 * @returns The opcode dword.
2935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2936 */
2937DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2938{
2939# ifdef IEM_WITH_CODE_TLB
2940 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2941 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2942 if (RT_LIKELY( pbBuf != NULL
2943 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2944 {
2945 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2946# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2947 return *(uint32_t const *)&pbBuf[offBuf];
2948# else
2949 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2950 pbBuf[offBuf + 1],
2951 pbBuf[offBuf + 2],
2952 pbBuf[offBuf + 3]);
2953# endif
2954 }
2955# else
2956 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2957 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2958 {
2959 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2960# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2961 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2962# else
2963 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2964 pVCpu->iem.s.abOpcode[offOpcode + 1],
2965 pVCpu->iem.s.abOpcode[offOpcode + 2],
2966 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2967# endif
2968 }
2969# endif
2970 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2971}
2972
2973#endif /* !IEM_WITH_SETJMP */
2974
2975
2976/**
2977 * Fetches the next opcode dword, returns automatically on failure.
2978 *
2979 * @param a_pu32 Where to return the opcode dword.
2980 * @remark Implicitly references pVCpu.
2981 */
2982#ifndef IEM_WITH_SETJMP
2983# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2984 do \
2985 { \
2986 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2987 if (rcStrict2 != VINF_SUCCESS) \
2988 return rcStrict2; \
2989 } while (0)
2990#else
2991# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2992#endif
2993
2994#ifndef IEM_WITH_SETJMP
2995
2996/**
2997 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2998 *
2999 * @returns Strict VBox status code.
3000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3001 * @param pu64 Where to return the opcode dword.
3002 */
3003DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3004{
3005 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3006 if (rcStrict == VINF_SUCCESS)
3007 {
3008 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3009 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3010 pVCpu->iem.s.abOpcode[offOpcode + 1],
3011 pVCpu->iem.s.abOpcode[offOpcode + 2],
3012 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3013 pVCpu->iem.s.offOpcode = offOpcode + 4;
3014 }
3015 else
3016 *pu64 = 0;
3017 return rcStrict;
3018}
3019
3020
3021/**
3022 * Fetches the next opcode dword, zero extending it to a quad word.
3023 *
3024 * @returns Strict VBox status code.
3025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3026 * @param pu64 Where to return the opcode quad word.
3027 */
3028DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3029{
3030 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3031 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3032 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3033
3034 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3035 pVCpu->iem.s.abOpcode[offOpcode + 1],
3036 pVCpu->iem.s.abOpcode[offOpcode + 2],
3037 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3038 pVCpu->iem.s.offOpcode = offOpcode + 4;
3039 return VINF_SUCCESS;
3040}
3041
3042#endif /* !IEM_WITH_SETJMP */
3043
3044
3045/**
3046 * Fetches the next opcode dword and zero extends it to a quad word, returns
3047 * automatically on failure.
3048 *
3049 * @param a_pu64 Where to return the opcode quad word.
3050 * @remark Implicitly references pVCpu.
3051 */
3052#ifndef IEM_WITH_SETJMP
3053# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3054 do \
3055 { \
3056 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3057 if (rcStrict2 != VINF_SUCCESS) \
3058 return rcStrict2; \
3059 } while (0)
3060#else
3061# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3062#endif
3063
3064
3065#ifndef IEM_WITH_SETJMP
3066/**
3067 * Fetches the next signed double word from the opcode stream.
3068 *
3069 * @returns Strict VBox status code.
3070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3071 * @param pi32 Where to return the signed double word.
3072 */
3073DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3074{
3075 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3076}
3077#endif
3078
3079/**
3080 * Fetches the next signed double word from the opcode stream, returning
3081 * automatically on failure.
3082 *
3083 * @param a_pi32 Where to return the signed double word.
3084 * @remark Implicitly references pVCpu.
3085 */
3086#ifndef IEM_WITH_SETJMP
3087# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3088 do \
3089 { \
3090 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3091 if (rcStrict2 != VINF_SUCCESS) \
3092 return rcStrict2; \
3093 } while (0)
3094#else
3095# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3096#endif
3097
3098#ifndef IEM_WITH_SETJMP
3099
3100/**
3101 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3102 *
3103 * @returns Strict VBox status code.
3104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3105 * @param pu64 Where to return the opcode qword.
3106 */
3107DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3108{
3109 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3110 if (rcStrict == VINF_SUCCESS)
3111 {
3112 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3113 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3114 pVCpu->iem.s.abOpcode[offOpcode + 1],
3115 pVCpu->iem.s.abOpcode[offOpcode + 2],
3116 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3117 pVCpu->iem.s.offOpcode = offOpcode + 4;
3118 }
3119 else
3120 *pu64 = 0;
3121 return rcStrict;
3122}
3123
3124
3125/**
3126 * Fetches the next opcode dword, sign extending it into a quad word.
3127 *
3128 * @returns Strict VBox status code.
3129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3130 * @param pu64 Where to return the opcode quad word.
3131 */
3132DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3133{
3134 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3135 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3136 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3137
3138 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3139 pVCpu->iem.s.abOpcode[offOpcode + 1],
3140 pVCpu->iem.s.abOpcode[offOpcode + 2],
3141 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3142 *pu64 = i32;
3143 pVCpu->iem.s.offOpcode = offOpcode + 4;
3144 return VINF_SUCCESS;
3145}
3146
3147#endif /* !IEM_WITH_SETJMP */
3148
3149
3150/**
3151 * Fetches the next opcode double word and sign extends it to a quad word,
3152 * returns automatically on failure.
3153 *
3154 * @param a_pu64 Where to return the opcode quad word.
3155 * @remark Implicitly references pVCpu.
3156 */
3157#ifndef IEM_WITH_SETJMP
3158# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3159 do \
3160 { \
3161 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3162 if (rcStrict2 != VINF_SUCCESS) \
3163 return rcStrict2; \
3164 } while (0)
3165#else
3166# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3167#endif
3168
3169#ifndef IEM_WITH_SETJMP
3170
3171/**
3172 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3173 *
3174 * @returns Strict VBox status code.
3175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3176 * @param pu64 Where to return the opcode qword.
3177 */
3178DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3179{
3180 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3181 if (rcStrict == VINF_SUCCESS)
3182 {
3183 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3184# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3185 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3186# else
3187 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3188 pVCpu->iem.s.abOpcode[offOpcode + 1],
3189 pVCpu->iem.s.abOpcode[offOpcode + 2],
3190 pVCpu->iem.s.abOpcode[offOpcode + 3],
3191 pVCpu->iem.s.abOpcode[offOpcode + 4],
3192 pVCpu->iem.s.abOpcode[offOpcode + 5],
3193 pVCpu->iem.s.abOpcode[offOpcode + 6],
3194 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3195# endif
3196 pVCpu->iem.s.offOpcode = offOpcode + 8;
3197 }
3198 else
3199 *pu64 = 0;
3200 return rcStrict;
3201}
3202
3203
3204/**
3205 * Fetches the next opcode qword.
3206 *
3207 * @returns Strict VBox status code.
3208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3209 * @param pu64 Where to return the opcode qword.
3210 */
3211DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3212{
3213 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3214 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3215 {
3216# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3217 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3218# else
3219 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3220 pVCpu->iem.s.abOpcode[offOpcode + 1],
3221 pVCpu->iem.s.abOpcode[offOpcode + 2],
3222 pVCpu->iem.s.abOpcode[offOpcode + 3],
3223 pVCpu->iem.s.abOpcode[offOpcode + 4],
3224 pVCpu->iem.s.abOpcode[offOpcode + 5],
3225 pVCpu->iem.s.abOpcode[offOpcode + 6],
3226 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3227# endif
3228 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3229 return VINF_SUCCESS;
3230 }
3231 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3232}
3233
3234#else /* IEM_WITH_SETJMP */
3235
3236/**
3237 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3238 *
3239 * @returns The opcode qword.
3240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3241 */
3242DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3243{
3244# ifdef IEM_WITH_CODE_TLB
3245 uint64_t u64;
3246 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3247 return u64;
3248# else
3249 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3250 if (rcStrict == VINF_SUCCESS)
3251 {
3252 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3253 pVCpu->iem.s.offOpcode = offOpcode + 8;
3254# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3255 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3256# else
3257 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3258 pVCpu->iem.s.abOpcode[offOpcode + 1],
3259 pVCpu->iem.s.abOpcode[offOpcode + 2],
3260 pVCpu->iem.s.abOpcode[offOpcode + 3],
3261 pVCpu->iem.s.abOpcode[offOpcode + 4],
3262 pVCpu->iem.s.abOpcode[offOpcode + 5],
3263 pVCpu->iem.s.abOpcode[offOpcode + 6],
3264 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3265# endif
3266 }
3267 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3268# endif
3269}
3270
3271
3272/**
3273 * Fetches the next opcode qword, longjmp on error.
3274 *
3275 * @returns The opcode qword.
3276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3277 */
3278DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3279{
3280# ifdef IEM_WITH_CODE_TLB
3281 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3282 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3283 if (RT_LIKELY( pbBuf != NULL
3284 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3285 {
3286 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3287# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3288 return *(uint64_t const *)&pbBuf[offBuf];
3289# else
3290 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3291 pbBuf[offBuf + 1],
3292 pbBuf[offBuf + 2],
3293 pbBuf[offBuf + 3],
3294 pbBuf[offBuf + 4],
3295 pbBuf[offBuf + 5],
3296 pbBuf[offBuf + 6],
3297 pbBuf[offBuf + 7]);
3298# endif
3299 }
3300# else
3301 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3302 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3303 {
3304 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3305# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3306 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3307# else
3308 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3309 pVCpu->iem.s.abOpcode[offOpcode + 1],
3310 pVCpu->iem.s.abOpcode[offOpcode + 2],
3311 pVCpu->iem.s.abOpcode[offOpcode + 3],
3312 pVCpu->iem.s.abOpcode[offOpcode + 4],
3313 pVCpu->iem.s.abOpcode[offOpcode + 5],
3314 pVCpu->iem.s.abOpcode[offOpcode + 6],
3315 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3316# endif
3317 }
3318# endif
3319 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3320}
3321
3322#endif /* IEM_WITH_SETJMP */
3323
3324/**
3325 * Fetches the next opcode quad word, returns automatically on failure.
3326 *
3327 * @param a_pu64 Where to return the opcode quad word.
3328 * @remark Implicitly references pVCpu.
3329 */
3330#ifndef IEM_WITH_SETJMP
3331# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3332 do \
3333 { \
3334 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3335 if (rcStrict2 != VINF_SUCCESS) \
3336 return rcStrict2; \
3337 } while (0)
3338#else
3339# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3340#endif
3341
3342
3343/** @name Misc Worker Functions.
3344 * @{
3345 */
3346
3347/**
3348 * Gets the exception class for the specified exception vector.
3349 *
3350 * @returns The class of the specified exception.
3351 * @param uVector The exception vector.
3352 */
3353IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3354{
3355 Assert(uVector <= X86_XCPT_LAST);
3356 switch (uVector)
3357 {
3358 case X86_XCPT_DE:
3359 case X86_XCPT_TS:
3360 case X86_XCPT_NP:
3361 case X86_XCPT_SS:
3362 case X86_XCPT_GP:
3363 case X86_XCPT_SX: /* AMD only */
3364 return IEMXCPTCLASS_CONTRIBUTORY;
3365
3366 case X86_XCPT_PF:
3367 case X86_XCPT_VE: /* Intel only */
3368 return IEMXCPTCLASS_PAGE_FAULT;
3369
3370 case X86_XCPT_DF:
3371 return IEMXCPTCLASS_DOUBLE_FAULT;
3372 }
3373 return IEMXCPTCLASS_BENIGN;
3374}
3375
3376
3377/**
3378 * Evaluates how to handle an exception caused during delivery of another event
3379 * (exception / interrupt).
3380 *
3381 * @returns How to handle the recursive exception.
3382 * @param pVCpu The cross context virtual CPU structure of the
3383 * calling thread.
3384 * @param fPrevFlags The flags of the previous event.
3385 * @param uPrevVector The vector of the previous event.
3386 * @param fCurFlags The flags of the current exception.
3387 * @param uCurVector The vector of the current exception.
3388 * @param pfXcptRaiseInfo Where to store additional information about the
3389 * exception condition. Optional.
3390 */
3391VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3392 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3393{
3394 /*
3395 * Only CPU exceptions can be raised while delivering other events, software interrupt
3396 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3397 */
3398 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3399 Assert(pVCpu); RT_NOREF(pVCpu);
3400 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3401
3402 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3403 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3404 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3405 {
3406 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3407 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3408 {
3409 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3410 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3411 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3412 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3413 {
3414 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3415 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3416 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3417 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3418 uCurVector, pVCpu->cpum.GstCtx.cr2));
3419 }
3420 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3421 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3422 {
3423 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3424 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3425 }
3426 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3427 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3428 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3429 {
3430 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3431 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3432 }
3433 }
3434 else
3435 {
3436 if (uPrevVector == X86_XCPT_NMI)
3437 {
3438 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3439 if (uCurVector == X86_XCPT_PF)
3440 {
3441 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3442 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3443 }
3444 }
3445 else if ( uPrevVector == X86_XCPT_AC
3446 && uCurVector == X86_XCPT_AC)
3447 {
3448 enmRaise = IEMXCPTRAISE_CPU_HANG;
3449 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3450 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3451 }
3452 }
3453 }
3454 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3455 {
3456 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3457 if (uCurVector == X86_XCPT_PF)
3458 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3459 }
3460 else
3461 {
3462 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3463 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3464 }
3465
3466 if (pfXcptRaiseInfo)
3467 *pfXcptRaiseInfo = fRaiseInfo;
3468 return enmRaise;
3469}
3470
3471
3472/**
3473 * Enters the CPU shutdown state initiated by a triple fault or other
3474 * unrecoverable conditions.
3475 *
3476 * @returns Strict VBox status code.
3477 * @param pVCpu The cross context virtual CPU structure of the
3478 * calling thread.
3479 */
3480IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3481{
3482 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3483 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3484
3485 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3486 {
3487 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3488 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3489 }
3490
3491 RT_NOREF(pVCpu);
3492 return VINF_EM_TRIPLE_FAULT;
3493}
3494
3495
3496/**
3497 * Validates a new SS segment.
3498 *
3499 * @returns VBox strict status code.
3500 * @param pVCpu The cross context virtual CPU structure of the
3501 * calling thread.
3502 * @param NewSS The new SS selctor.
3503 * @param uCpl The CPL to load the stack for.
3504 * @param pDesc Where to return the descriptor.
3505 */
3506IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3507{
3508 /* Null selectors are not allowed (we're not called for dispatching
3509 interrupts with SS=0 in long mode). */
3510 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3511 {
3512 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3513 return iemRaiseTaskSwitchFault0(pVCpu);
3514 }
3515
3516 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3517 if ((NewSS & X86_SEL_RPL) != uCpl)
3518 {
3519 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3520 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3521 }
3522
3523 /*
3524 * Read the descriptor.
3525 */
3526 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3527 if (rcStrict != VINF_SUCCESS)
3528 return rcStrict;
3529
3530 /*
3531 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3532 */
3533 if (!pDesc->Legacy.Gen.u1DescType)
3534 {
3535 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3536 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3537 }
3538
3539 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3540 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3541 {
3542 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3543 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3544 }
3545 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3546 {
3547 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3548 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3549 }
3550
3551 /* Is it there? */
3552 /** @todo testcase: Is this checked before the canonical / limit check below? */
3553 if (!pDesc->Legacy.Gen.u1Present)
3554 {
3555 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3556 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3557 }
3558
3559 return VINF_SUCCESS;
3560}
3561
3562
3563/**
3564 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3565 * not.
3566 *
3567 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3568 */
3569#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3570# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3571#else
3572# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3573#endif
3574
3575/**
3576 * Updates the EFLAGS in the correct manner wrt. PATM.
3577 *
3578 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3579 * @param a_fEfl The new EFLAGS.
3580 */
3581#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3582# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3583#else
3584# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3585#endif
3586
3587
3588/** @} */
3589
3590/** @name Raising Exceptions.
3591 *
3592 * @{
3593 */
3594
3595
3596/**
3597 * Loads the specified stack far pointer from the TSS.
3598 *
3599 * @returns VBox strict status code.
3600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3601 * @param uCpl The CPL to load the stack for.
3602 * @param pSelSS Where to return the new stack segment.
3603 * @param puEsp Where to return the new stack pointer.
3604 */
3605IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3606{
3607 VBOXSTRICTRC rcStrict;
3608 Assert(uCpl < 4);
3609
3610 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3611 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3612 {
3613 /*
3614 * 16-bit TSS (X86TSS16).
3615 */
3616 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3617 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3618 {
3619 uint32_t off = uCpl * 4 + 2;
3620 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3621 {
3622 /** @todo check actual access pattern here. */
3623 uint32_t u32Tmp = 0; /* gcc maybe... */
3624 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3625 if (rcStrict == VINF_SUCCESS)
3626 {
3627 *puEsp = RT_LOWORD(u32Tmp);
3628 *pSelSS = RT_HIWORD(u32Tmp);
3629 return VINF_SUCCESS;
3630 }
3631 }
3632 else
3633 {
3634 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3635 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3636 }
3637 break;
3638 }
3639
3640 /*
3641 * 32-bit TSS (X86TSS32).
3642 */
3643 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3644 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3645 {
3646 uint32_t off = uCpl * 8 + 4;
3647 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3648 {
3649/** @todo check actual access pattern here. */
3650 uint64_t u64Tmp;
3651 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3652 if (rcStrict == VINF_SUCCESS)
3653 {
3654 *puEsp = u64Tmp & UINT32_MAX;
3655 *pSelSS = (RTSEL)(u64Tmp >> 32);
3656 return VINF_SUCCESS;
3657 }
3658 }
3659 else
3660 {
3661 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3662 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3663 }
3664 break;
3665 }
3666
3667 default:
3668 AssertFailed();
3669 rcStrict = VERR_IEM_IPE_4;
3670 break;
3671 }
3672
3673 *puEsp = 0; /* make gcc happy */
3674 *pSelSS = 0; /* make gcc happy */
3675 return rcStrict;
3676}
3677
3678
3679/**
3680 * Loads the specified stack pointer from the 64-bit TSS.
3681 *
3682 * @returns VBox strict status code.
3683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3684 * @param uCpl The CPL to load the stack for.
3685 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3686 * @param puRsp Where to return the new stack pointer.
3687 */
3688IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3689{
3690 Assert(uCpl < 4);
3691 Assert(uIst < 8);
3692 *puRsp = 0; /* make gcc happy */
3693
3694 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3695 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3696
3697 uint32_t off;
3698 if (uIst)
3699 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3700 else
3701 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3702 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3703 {
3704 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3705 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3706 }
3707
3708 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3709}
3710
3711
3712/**
3713 * Adjust the CPU state according to the exception being raised.
3714 *
3715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3716 * @param u8Vector The exception that has been raised.
3717 */
3718DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3719{
3720 switch (u8Vector)
3721 {
3722 case X86_XCPT_DB:
3723 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3724 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3725 break;
3726 /** @todo Read the AMD and Intel exception reference... */
3727 }
3728}
3729
3730
3731/**
3732 * Implements exceptions and interrupts for real mode.
3733 *
3734 * @returns VBox strict status code.
3735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3736 * @param cbInstr The number of bytes to offset rIP by in the return
3737 * address.
3738 * @param u8Vector The interrupt / exception vector number.
3739 * @param fFlags The flags.
3740 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3741 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3742 */
3743IEM_STATIC VBOXSTRICTRC
3744iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3745 uint8_t cbInstr,
3746 uint8_t u8Vector,
3747 uint32_t fFlags,
3748 uint16_t uErr,
3749 uint64_t uCr2)
3750{
3751 NOREF(uErr); NOREF(uCr2);
3752 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3753
3754 /*
3755 * Read the IDT entry.
3756 */
3757 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3758 {
3759 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3760 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3761 }
3762 RTFAR16 Idte;
3763 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3764 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3765 {
3766 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3767 return rcStrict;
3768 }
3769
3770 /*
3771 * Push the stack frame.
3772 */
3773 uint16_t *pu16Frame;
3774 uint64_t uNewRsp;
3775 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3776 if (rcStrict != VINF_SUCCESS)
3777 return rcStrict;
3778
3779 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3780#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3781 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3782 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3783 fEfl |= UINT16_C(0xf000);
3784#endif
3785 pu16Frame[2] = (uint16_t)fEfl;
3786 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3787 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3788 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3789 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3790 return rcStrict;
3791
3792 /*
3793 * Load the vector address into cs:ip and make exception specific state
3794 * adjustments.
3795 */
3796 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3797 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3798 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3799 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3800 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3801 pVCpu->cpum.GstCtx.rip = Idte.off;
3802 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3803 IEMMISC_SET_EFL(pVCpu, fEfl);
3804
3805 /** @todo do we actually do this in real mode? */
3806 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3807 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3808
3809 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3810}
3811
3812
3813/**
3814 * Loads a NULL data selector into when coming from V8086 mode.
3815 *
3816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3817 * @param pSReg Pointer to the segment register.
3818 */
3819IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3820{
3821 pSReg->Sel = 0;
3822 pSReg->ValidSel = 0;
3823 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3824 {
3825 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3826 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3827 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3828 }
3829 else
3830 {
3831 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3832 /** @todo check this on AMD-V */
3833 pSReg->u64Base = 0;
3834 pSReg->u32Limit = 0;
3835 }
3836}
3837
3838
3839/**
3840 * Loads a segment selector during a task switch in V8086 mode.
3841 *
3842 * @param pSReg Pointer to the segment register.
3843 * @param uSel The selector value to load.
3844 */
3845IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3846{
3847 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3848 pSReg->Sel = uSel;
3849 pSReg->ValidSel = uSel;
3850 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3851 pSReg->u64Base = uSel << 4;
3852 pSReg->u32Limit = 0xffff;
3853 pSReg->Attr.u = 0xf3;
3854}
3855
3856
3857/**
3858 * Loads a NULL data selector into a selector register, both the hidden and
3859 * visible parts, in protected mode.
3860 *
3861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3862 * @param pSReg Pointer to the segment register.
3863 * @param uRpl The RPL.
3864 */
3865IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3866{
3867 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3868 * data selector in protected mode. */
3869 pSReg->Sel = uRpl;
3870 pSReg->ValidSel = uRpl;
3871 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3872 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3873 {
3874 /* VT-x (Intel 3960x) observed doing something like this. */
3875 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3876 pSReg->u32Limit = UINT32_MAX;
3877 pSReg->u64Base = 0;
3878 }
3879 else
3880 {
3881 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3882 pSReg->u32Limit = 0;
3883 pSReg->u64Base = 0;
3884 }
3885}
3886
3887
3888/**
3889 * Loads a segment selector during a task switch in protected mode.
3890 *
3891 * In this task switch scenario, we would throw \#TS exceptions rather than
3892 * \#GPs.
3893 *
3894 * @returns VBox strict status code.
3895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3896 * @param pSReg Pointer to the segment register.
3897 * @param uSel The new selector value.
3898 *
3899 * @remarks This does _not_ handle CS or SS.
3900 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3901 */
3902IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3903{
3904 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3905
3906 /* Null data selector. */
3907 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3908 {
3909 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3910 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3911 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3912 return VINF_SUCCESS;
3913 }
3914
3915 /* Fetch the descriptor. */
3916 IEMSELDESC Desc;
3917 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3918 if (rcStrict != VINF_SUCCESS)
3919 {
3920 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3921 VBOXSTRICTRC_VAL(rcStrict)));
3922 return rcStrict;
3923 }
3924
3925 /* Must be a data segment or readable code segment. */
3926 if ( !Desc.Legacy.Gen.u1DescType
3927 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3928 {
3929 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3930 Desc.Legacy.Gen.u4Type));
3931 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3932 }
3933
3934 /* Check privileges for data segments and non-conforming code segments. */
3935 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3936 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3937 {
3938 /* The RPL and the new CPL must be less than or equal to the DPL. */
3939 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3940 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3941 {
3942 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3943 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3944 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3945 }
3946 }
3947
3948 /* Is it there? */
3949 if (!Desc.Legacy.Gen.u1Present)
3950 {
3951 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3952 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3953 }
3954
3955 /* The base and limit. */
3956 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3957 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3958
3959 /*
3960 * Ok, everything checked out fine. Now set the accessed bit before
3961 * committing the result into the registers.
3962 */
3963 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3964 {
3965 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3966 if (rcStrict != VINF_SUCCESS)
3967 return rcStrict;
3968 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3969 }
3970
3971 /* Commit */
3972 pSReg->Sel = uSel;
3973 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3974 pSReg->u32Limit = cbLimit;
3975 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3976 pSReg->ValidSel = uSel;
3977 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3978 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3979 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3980
3981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3982 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3983 return VINF_SUCCESS;
3984}
3985
3986
3987/**
3988 * Performs a task switch.
3989 *
3990 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3991 * caller is responsible for performing the necessary checks (like DPL, TSS
3992 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3993 * reference for JMP, CALL, IRET.
3994 *
3995 * If the task switch is the due to a software interrupt or hardware exception,
3996 * the caller is responsible for validating the TSS selector and descriptor. See
3997 * Intel Instruction reference for INT n.
3998 *
3999 * @returns VBox strict status code.
4000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4001 * @param enmTaskSwitch The cause of the task switch.
4002 * @param uNextEip The EIP effective after the task switch.
4003 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4004 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4005 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4006 * @param SelTSS The TSS selector of the new task.
4007 * @param pNewDescTSS Pointer to the new TSS descriptor.
4008 */
4009IEM_STATIC VBOXSTRICTRC
4010iemTaskSwitch(PVMCPU pVCpu,
4011 IEMTASKSWITCH enmTaskSwitch,
4012 uint32_t uNextEip,
4013 uint32_t fFlags,
4014 uint16_t uErr,
4015 uint64_t uCr2,
4016 RTSEL SelTSS,
4017 PIEMSELDESC pNewDescTSS)
4018{
4019 Assert(!IEM_IS_REAL_MODE(pVCpu));
4020 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4021 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4022
4023 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4024 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4025 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4026 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4027 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4028
4029 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4030 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4031
4032 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4033 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4034
4035 /* Update CR2 in case it's a page-fault. */
4036 /** @todo This should probably be done much earlier in IEM/PGM. See
4037 * @bugref{5653#c49}. */
4038 if (fFlags & IEM_XCPT_FLAGS_CR2)
4039 pVCpu->cpum.GstCtx.cr2 = uCr2;
4040
4041 /*
4042 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4043 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4044 */
4045 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4046 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4047 if (uNewTSSLimit < uNewTSSLimitMin)
4048 {
4049 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4050 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4051 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4052 }
4053
4054 /*
4055 * Task switches in VMX non-root mode always cause task switches.
4056 * The new TSS must have been read and validated (DPL, limits etc.) before a
4057 * task-switch VM-exit commences.
4058 *
4059 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4060 */
4061 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4062 {
4063 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4064 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4065 }
4066
4067 /*
4068 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4069 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4070 */
4071 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4072 {
4073 uint32_t const uExitInfo1 = SelTSS;
4074 uint32_t uExitInfo2 = uErr;
4075 switch (enmTaskSwitch)
4076 {
4077 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4078 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4079 default: break;
4080 }
4081 if (fFlags & IEM_XCPT_FLAGS_ERR)
4082 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4083 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4084 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4085
4086 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4087 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4088 RT_NOREF2(uExitInfo1, uExitInfo2);
4089 }
4090
4091 /*
4092 * Check the current TSS limit. The last written byte to the current TSS during the
4093 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4094 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4095 *
4096 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4097 * end up with smaller than "legal" TSS limits.
4098 */
4099 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4100 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4101 if (uCurTSSLimit < uCurTSSLimitMin)
4102 {
4103 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4104 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4105 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4106 }
4107
4108 /*
4109 * Verify that the new TSS can be accessed and map it. Map only the required contents
4110 * and not the entire TSS.
4111 */
4112 void *pvNewTSS;
4113 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4114 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4115 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4116 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4117 * not perform correct translation if this happens. See Intel spec. 7.2.1
4118 * "Task-State Segment" */
4119 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4120 if (rcStrict != VINF_SUCCESS)
4121 {
4122 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4123 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4124 return rcStrict;
4125 }
4126
4127 /*
4128 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4129 */
4130 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4131 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4132 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4133 {
4134 PX86DESC pDescCurTSS;
4135 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4136 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4137 if (rcStrict != VINF_SUCCESS)
4138 {
4139 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4140 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4141 return rcStrict;
4142 }
4143
4144 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4145 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4146 if (rcStrict != VINF_SUCCESS)
4147 {
4148 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4149 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4150 return rcStrict;
4151 }
4152
4153 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4154 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4155 {
4156 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4157 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4158 u32EFlags &= ~X86_EFL_NT;
4159 }
4160 }
4161
4162 /*
4163 * Save the CPU state into the current TSS.
4164 */
4165 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4166 if (GCPtrNewTSS == GCPtrCurTSS)
4167 {
4168 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4169 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4170 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4171 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4172 pVCpu->cpum.GstCtx.ldtr.Sel));
4173 }
4174 if (fIsNewTSS386)
4175 {
4176 /*
4177 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4178 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4179 */
4180 void *pvCurTSS32;
4181 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4182 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4183 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4184 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4185 if (rcStrict != VINF_SUCCESS)
4186 {
4187 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4188 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4189 return rcStrict;
4190 }
4191
4192 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4193 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4194 pCurTSS32->eip = uNextEip;
4195 pCurTSS32->eflags = u32EFlags;
4196 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4197 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4198 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4199 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4200 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4201 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4202 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4203 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4204 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4205 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4206 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4207 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4208 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4209 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4210
4211 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4212 if (rcStrict != VINF_SUCCESS)
4213 {
4214 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4215 VBOXSTRICTRC_VAL(rcStrict)));
4216 return rcStrict;
4217 }
4218 }
4219 else
4220 {
4221 /*
4222 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4223 */
4224 void *pvCurTSS16;
4225 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4226 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4227 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4228 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4229 if (rcStrict != VINF_SUCCESS)
4230 {
4231 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4232 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4233 return rcStrict;
4234 }
4235
4236 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4237 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4238 pCurTSS16->ip = uNextEip;
4239 pCurTSS16->flags = u32EFlags;
4240 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4241 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4242 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4243 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4244 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4245 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4246 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4247 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4248 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4249 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4250 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4251 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4252
4253 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4254 if (rcStrict != VINF_SUCCESS)
4255 {
4256 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4257 VBOXSTRICTRC_VAL(rcStrict)));
4258 return rcStrict;
4259 }
4260 }
4261
4262 /*
4263 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4264 */
4265 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4266 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4267 {
4268 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4269 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4270 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4271 }
4272
4273 /*
4274 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4275 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4276 */
4277 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4278 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4279 bool fNewDebugTrap;
4280 if (fIsNewTSS386)
4281 {
4282 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4283 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4284 uNewEip = pNewTSS32->eip;
4285 uNewEflags = pNewTSS32->eflags;
4286 uNewEax = pNewTSS32->eax;
4287 uNewEcx = pNewTSS32->ecx;
4288 uNewEdx = pNewTSS32->edx;
4289 uNewEbx = pNewTSS32->ebx;
4290 uNewEsp = pNewTSS32->esp;
4291 uNewEbp = pNewTSS32->ebp;
4292 uNewEsi = pNewTSS32->esi;
4293 uNewEdi = pNewTSS32->edi;
4294 uNewES = pNewTSS32->es;
4295 uNewCS = pNewTSS32->cs;
4296 uNewSS = pNewTSS32->ss;
4297 uNewDS = pNewTSS32->ds;
4298 uNewFS = pNewTSS32->fs;
4299 uNewGS = pNewTSS32->gs;
4300 uNewLdt = pNewTSS32->selLdt;
4301 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4302 }
4303 else
4304 {
4305 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4306 uNewCr3 = 0;
4307 uNewEip = pNewTSS16->ip;
4308 uNewEflags = pNewTSS16->flags;
4309 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4310 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4311 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4312 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4313 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4314 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4315 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4316 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4317 uNewES = pNewTSS16->es;
4318 uNewCS = pNewTSS16->cs;
4319 uNewSS = pNewTSS16->ss;
4320 uNewDS = pNewTSS16->ds;
4321 uNewFS = 0;
4322 uNewGS = 0;
4323 uNewLdt = pNewTSS16->selLdt;
4324 fNewDebugTrap = false;
4325 }
4326
4327 if (GCPtrNewTSS == GCPtrCurTSS)
4328 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4329 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4330
4331 /*
4332 * We're done accessing the new TSS.
4333 */
4334 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4335 if (rcStrict != VINF_SUCCESS)
4336 {
4337 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4338 return rcStrict;
4339 }
4340
4341 /*
4342 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4343 */
4344 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4345 {
4346 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4347 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4348 if (rcStrict != VINF_SUCCESS)
4349 {
4350 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4351 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4352 return rcStrict;
4353 }
4354
4355 /* Check that the descriptor indicates the new TSS is available (not busy). */
4356 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4357 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4358 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4359
4360 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4361 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4362 if (rcStrict != VINF_SUCCESS)
4363 {
4364 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4365 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4366 return rcStrict;
4367 }
4368 }
4369
4370 /*
4371 * From this point on, we're technically in the new task. We will defer exceptions
4372 * until the completion of the task switch but before executing any instructions in the new task.
4373 */
4374 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4375 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4376 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4377 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4378 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4379 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4380 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4381
4382 /* Set the busy bit in TR. */
4383 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4384 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4385 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4386 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4387 {
4388 uNewEflags |= X86_EFL_NT;
4389 }
4390
4391 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4392 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4393 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4394
4395 pVCpu->cpum.GstCtx.eip = uNewEip;
4396 pVCpu->cpum.GstCtx.eax = uNewEax;
4397 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4398 pVCpu->cpum.GstCtx.edx = uNewEdx;
4399 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4400 pVCpu->cpum.GstCtx.esp = uNewEsp;
4401 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4402 pVCpu->cpum.GstCtx.esi = uNewEsi;
4403 pVCpu->cpum.GstCtx.edi = uNewEdi;
4404
4405 uNewEflags &= X86_EFL_LIVE_MASK;
4406 uNewEflags |= X86_EFL_RA1_MASK;
4407 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4408
4409 /*
4410 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4411 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4412 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4413 */
4414 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4415 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4416
4417 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4418 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4419
4420 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4421 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4422
4423 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4424 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4425
4426 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4427 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4428
4429 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4430 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4431 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4432
4433 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4434 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4435 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4436 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4437
4438 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4439 {
4440 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4441 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4442 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4443 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4444 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4445 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4446 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4447 }
4448
4449 /*
4450 * Switch CR3 for the new task.
4451 */
4452 if ( fIsNewTSS386
4453 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4454 {
4455 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4456 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4457 AssertRCSuccessReturn(rc, rc);
4458
4459 /* Inform PGM. */
4460 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4461 AssertRCReturn(rc, rc);
4462 /* ignore informational status codes */
4463
4464 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4465 }
4466
4467 /*
4468 * Switch LDTR for the new task.
4469 */
4470 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4471 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4472 else
4473 {
4474 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4475
4476 IEMSELDESC DescNewLdt;
4477 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4478 if (rcStrict != VINF_SUCCESS)
4479 {
4480 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4481 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4482 return rcStrict;
4483 }
4484 if ( !DescNewLdt.Legacy.Gen.u1Present
4485 || DescNewLdt.Legacy.Gen.u1DescType
4486 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4487 {
4488 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4489 uNewLdt, DescNewLdt.Legacy.u));
4490 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4491 }
4492
4493 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4494 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4495 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4496 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4497 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4498 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4499 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4500 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4501 }
4502
4503 IEMSELDESC DescSS;
4504 if (IEM_IS_V86_MODE(pVCpu))
4505 {
4506 pVCpu->iem.s.uCpl = 3;
4507 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4508 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4509 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4510 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4511 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4512 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4513
4514 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4515 DescSS.Legacy.u = 0;
4516 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4517 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4518 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4519 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4520 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4521 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4522 DescSS.Legacy.Gen.u2Dpl = 3;
4523 }
4524 else
4525 {
4526 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4527
4528 /*
4529 * Load the stack segment for the new task.
4530 */
4531 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4532 {
4533 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4534 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4535 }
4536
4537 /* Fetch the descriptor. */
4538 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4539 if (rcStrict != VINF_SUCCESS)
4540 {
4541 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4542 VBOXSTRICTRC_VAL(rcStrict)));
4543 return rcStrict;
4544 }
4545
4546 /* SS must be a data segment and writable. */
4547 if ( !DescSS.Legacy.Gen.u1DescType
4548 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4549 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4550 {
4551 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4552 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4553 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4554 }
4555
4556 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4557 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4558 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4559 {
4560 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4561 uNewCpl));
4562 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4563 }
4564
4565 /* Is it there? */
4566 if (!DescSS.Legacy.Gen.u1Present)
4567 {
4568 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4569 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4570 }
4571
4572 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4573 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4574
4575 /* Set the accessed bit before committing the result into SS. */
4576 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4577 {
4578 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4579 if (rcStrict != VINF_SUCCESS)
4580 return rcStrict;
4581 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4582 }
4583
4584 /* Commit SS. */
4585 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4586 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4587 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4588 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4589 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4590 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4591 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4592
4593 /* CPL has changed, update IEM before loading rest of segments. */
4594 pVCpu->iem.s.uCpl = uNewCpl;
4595
4596 /*
4597 * Load the data segments for the new task.
4598 */
4599 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4600 if (rcStrict != VINF_SUCCESS)
4601 return rcStrict;
4602 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4603 if (rcStrict != VINF_SUCCESS)
4604 return rcStrict;
4605 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4606 if (rcStrict != VINF_SUCCESS)
4607 return rcStrict;
4608 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4609 if (rcStrict != VINF_SUCCESS)
4610 return rcStrict;
4611
4612 /*
4613 * Load the code segment for the new task.
4614 */
4615 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4616 {
4617 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4618 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4619 }
4620
4621 /* Fetch the descriptor. */
4622 IEMSELDESC DescCS;
4623 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4624 if (rcStrict != VINF_SUCCESS)
4625 {
4626 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4627 return rcStrict;
4628 }
4629
4630 /* CS must be a code segment. */
4631 if ( !DescCS.Legacy.Gen.u1DescType
4632 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4633 {
4634 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4635 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4636 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4637 }
4638
4639 /* For conforming CS, DPL must be less than or equal to the RPL. */
4640 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4641 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4642 {
4643 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4644 DescCS.Legacy.Gen.u2Dpl));
4645 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4646 }
4647
4648 /* For non-conforming CS, DPL must match RPL. */
4649 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4650 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4651 {
4652 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4653 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4654 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4655 }
4656
4657 /* Is it there? */
4658 if (!DescCS.Legacy.Gen.u1Present)
4659 {
4660 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4661 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4662 }
4663
4664 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4665 u64Base = X86DESC_BASE(&DescCS.Legacy);
4666
4667 /* Set the accessed bit before committing the result into CS. */
4668 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4669 {
4670 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4671 if (rcStrict != VINF_SUCCESS)
4672 return rcStrict;
4673 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4674 }
4675
4676 /* Commit CS. */
4677 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4678 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4679 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4680 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4681 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4682 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4683 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4684 }
4685
4686 /** @todo Debug trap. */
4687 if (fIsNewTSS386 && fNewDebugTrap)
4688 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4689
4690 /*
4691 * Construct the error code masks based on what caused this task switch.
4692 * See Intel Instruction reference for INT.
4693 */
4694 uint16_t uExt;
4695 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4696 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4697 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4698 {
4699 uExt = 1;
4700 }
4701 else
4702 uExt = 0;
4703
4704 /*
4705 * Push any error code on to the new stack.
4706 */
4707 if (fFlags & IEM_XCPT_FLAGS_ERR)
4708 {
4709 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4710 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4711 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4712
4713 /* Check that there is sufficient space on the stack. */
4714 /** @todo Factor out segment limit checking for normal/expand down segments
4715 * into a separate function. */
4716 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4717 {
4718 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4719 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4720 {
4721 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4722 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4723 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4724 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4725 }
4726 }
4727 else
4728 {
4729 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4730 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4731 {
4732 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4733 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4734 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4735 }
4736 }
4737
4738
4739 if (fIsNewTSS386)
4740 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4741 else
4742 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4743 if (rcStrict != VINF_SUCCESS)
4744 {
4745 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4746 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4747 return rcStrict;
4748 }
4749 }
4750
4751 /* Check the new EIP against the new CS limit. */
4752 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4753 {
4754 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4755 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4756 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4757 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4758 }
4759
4760 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4761 pVCpu->cpum.GstCtx.ss.Sel));
4762 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4763}
4764
4765
4766/**
4767 * Implements exceptions and interrupts for protected mode.
4768 *
4769 * @returns VBox strict status code.
4770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4771 * @param cbInstr The number of bytes to offset rIP by in the return
4772 * address.
4773 * @param u8Vector The interrupt / exception vector number.
4774 * @param fFlags The flags.
4775 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4776 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4777 */
4778IEM_STATIC VBOXSTRICTRC
4779iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4780 uint8_t cbInstr,
4781 uint8_t u8Vector,
4782 uint32_t fFlags,
4783 uint16_t uErr,
4784 uint64_t uCr2)
4785{
4786 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4787
4788 /*
4789 * Read the IDT entry.
4790 */
4791 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4792 {
4793 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4794 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4795 }
4796 X86DESC Idte;
4797 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4798 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4799 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4800 {
4801 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4802 return rcStrict;
4803 }
4804 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4805 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4806 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4807
4808 /*
4809 * Check the descriptor type, DPL and such.
4810 * ASSUMES this is done in the same order as described for call-gate calls.
4811 */
4812 if (Idte.Gate.u1DescType)
4813 {
4814 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4815 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4816 }
4817 bool fTaskGate = false;
4818 uint8_t f32BitGate = true;
4819 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4820 switch (Idte.Gate.u4Type)
4821 {
4822 case X86_SEL_TYPE_SYS_UNDEFINED:
4823 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4824 case X86_SEL_TYPE_SYS_LDT:
4825 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4826 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4827 case X86_SEL_TYPE_SYS_UNDEFINED2:
4828 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4829 case X86_SEL_TYPE_SYS_UNDEFINED3:
4830 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4831 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4832 case X86_SEL_TYPE_SYS_UNDEFINED4:
4833 {
4834 /** @todo check what actually happens when the type is wrong...
4835 * esp. call gates. */
4836 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4837 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4838 }
4839
4840 case X86_SEL_TYPE_SYS_286_INT_GATE:
4841 f32BitGate = false;
4842 RT_FALL_THRU();
4843 case X86_SEL_TYPE_SYS_386_INT_GATE:
4844 fEflToClear |= X86_EFL_IF;
4845 break;
4846
4847 case X86_SEL_TYPE_SYS_TASK_GATE:
4848 fTaskGate = true;
4849#ifndef IEM_IMPLEMENTS_TASKSWITCH
4850 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4851#endif
4852 break;
4853
4854 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4855 f32BitGate = false;
4856 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4857 break;
4858
4859 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4860 }
4861
4862 /* Check DPL against CPL if applicable. */
4863 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4864 {
4865 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4866 {
4867 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4868 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4869 }
4870 }
4871
4872 /* Is it there? */
4873 if (!Idte.Gate.u1Present)
4874 {
4875 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4876 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4877 }
4878
4879 /* Is it a task-gate? */
4880 if (fTaskGate)
4881 {
4882 /*
4883 * Construct the error code masks based on what caused this task switch.
4884 * See Intel Instruction reference for INT.
4885 */
4886 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4887 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4888 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4889 RTSEL SelTSS = Idte.Gate.u16Sel;
4890
4891 /*
4892 * Fetch the TSS descriptor in the GDT.
4893 */
4894 IEMSELDESC DescTSS;
4895 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4896 if (rcStrict != VINF_SUCCESS)
4897 {
4898 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4899 VBOXSTRICTRC_VAL(rcStrict)));
4900 return rcStrict;
4901 }
4902
4903 /* The TSS descriptor must be a system segment and be available (not busy). */
4904 if ( DescTSS.Legacy.Gen.u1DescType
4905 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4906 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4907 {
4908 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4909 u8Vector, SelTSS, DescTSS.Legacy.au64));
4910 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4911 }
4912
4913 /* The TSS must be present. */
4914 if (!DescTSS.Legacy.Gen.u1Present)
4915 {
4916 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4917 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4918 }
4919
4920 /* Do the actual task switch. */
4921 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4922 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4923 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4924 }
4925
4926 /* A null CS is bad. */
4927 RTSEL NewCS = Idte.Gate.u16Sel;
4928 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4929 {
4930 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4931 return iemRaiseGeneralProtectionFault0(pVCpu);
4932 }
4933
4934 /* Fetch the descriptor for the new CS. */
4935 IEMSELDESC DescCS;
4936 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4937 if (rcStrict != VINF_SUCCESS)
4938 {
4939 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4940 return rcStrict;
4941 }
4942
4943 /* Must be a code segment. */
4944 if (!DescCS.Legacy.Gen.u1DescType)
4945 {
4946 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4947 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4948 }
4949 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4950 {
4951 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4952 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4953 }
4954
4955 /* Don't allow lowering the privilege level. */
4956 /** @todo Does the lowering of privileges apply to software interrupts
4957 * only? This has bearings on the more-privileged or
4958 * same-privilege stack behavior further down. A testcase would
4959 * be nice. */
4960 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4961 {
4962 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4963 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4964 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4965 }
4966
4967 /* Make sure the selector is present. */
4968 if (!DescCS.Legacy.Gen.u1Present)
4969 {
4970 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4971 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4972 }
4973
4974 /* Check the new EIP against the new CS limit. */
4975 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4976 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4977 ? Idte.Gate.u16OffsetLow
4978 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4979 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4980 if (uNewEip > cbLimitCS)
4981 {
4982 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4983 u8Vector, uNewEip, cbLimitCS, NewCS));
4984 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4985 }
4986 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4987
4988 /* Calc the flag image to push. */
4989 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4990 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4991 fEfl &= ~X86_EFL_RF;
4992 else
4993 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4994
4995 /* From V8086 mode only go to CPL 0. */
4996 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4997 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4998 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4999 {
5000 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5001 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5002 }
5003
5004 /*
5005 * If the privilege level changes, we need to get a new stack from the TSS.
5006 * This in turns means validating the new SS and ESP...
5007 */
5008 if (uNewCpl != pVCpu->iem.s.uCpl)
5009 {
5010 RTSEL NewSS;
5011 uint32_t uNewEsp;
5012 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5013 if (rcStrict != VINF_SUCCESS)
5014 return rcStrict;
5015
5016 IEMSELDESC DescSS;
5017 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5021 if (!DescSS.Legacy.Gen.u1DefBig)
5022 {
5023 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5024 uNewEsp = (uint16_t)uNewEsp;
5025 }
5026
5027 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5028
5029 /* Check that there is sufficient space for the stack frame. */
5030 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5031 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5032 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5033 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5034
5035 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5036 {
5037 if ( uNewEsp - 1 > cbLimitSS
5038 || uNewEsp < cbStackFrame)
5039 {
5040 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5041 u8Vector, NewSS, uNewEsp, cbStackFrame));
5042 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5043 }
5044 }
5045 else
5046 {
5047 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5048 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5049 {
5050 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5051 u8Vector, NewSS, uNewEsp, cbStackFrame));
5052 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5053 }
5054 }
5055
5056 /*
5057 * Start making changes.
5058 */
5059
5060 /* Set the new CPL so that stack accesses use it. */
5061 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5062 pVCpu->iem.s.uCpl = uNewCpl;
5063
5064 /* Create the stack frame. */
5065 RTPTRUNION uStackFrame;
5066 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5067 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5068 if (rcStrict != VINF_SUCCESS)
5069 return rcStrict;
5070 void * const pvStackFrame = uStackFrame.pv;
5071 if (f32BitGate)
5072 {
5073 if (fFlags & IEM_XCPT_FLAGS_ERR)
5074 *uStackFrame.pu32++ = uErr;
5075 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5076 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5077 uStackFrame.pu32[2] = fEfl;
5078 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5079 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5080 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5081 if (fEfl & X86_EFL_VM)
5082 {
5083 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5084 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5085 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5086 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5087 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5088 }
5089 }
5090 else
5091 {
5092 if (fFlags & IEM_XCPT_FLAGS_ERR)
5093 *uStackFrame.pu16++ = uErr;
5094 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5095 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5096 uStackFrame.pu16[2] = fEfl;
5097 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5098 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5099 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5100 if (fEfl & X86_EFL_VM)
5101 {
5102 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5103 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5104 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5105 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5106 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5107 }
5108 }
5109 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5110 if (rcStrict != VINF_SUCCESS)
5111 return rcStrict;
5112
5113 /* Mark the selectors 'accessed' (hope this is the correct time). */
5114 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5115 * after pushing the stack frame? (Write protect the gdt + stack to
5116 * find out.) */
5117 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5118 {
5119 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5120 if (rcStrict != VINF_SUCCESS)
5121 return rcStrict;
5122 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5123 }
5124
5125 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5126 {
5127 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5128 if (rcStrict != VINF_SUCCESS)
5129 return rcStrict;
5130 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5131 }
5132
5133 /*
5134 * Start comitting the register changes (joins with the DPL=CPL branch).
5135 */
5136 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5137 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5138 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5139 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5140 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5141 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5142 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5143 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5144 * SP is loaded).
5145 * Need to check the other combinations too:
5146 * - 16-bit TSS, 32-bit handler
5147 * - 32-bit TSS, 16-bit handler */
5148 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5149 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5150 else
5151 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5152
5153 if (fEfl & X86_EFL_VM)
5154 {
5155 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5156 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5157 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5158 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5159 }
5160 }
5161 /*
5162 * Same privilege, no stack change and smaller stack frame.
5163 */
5164 else
5165 {
5166 uint64_t uNewRsp;
5167 RTPTRUNION uStackFrame;
5168 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5169 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5170 if (rcStrict != VINF_SUCCESS)
5171 return rcStrict;
5172 void * const pvStackFrame = uStackFrame.pv;
5173
5174 if (f32BitGate)
5175 {
5176 if (fFlags & IEM_XCPT_FLAGS_ERR)
5177 *uStackFrame.pu32++ = uErr;
5178 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5179 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5180 uStackFrame.pu32[2] = fEfl;
5181 }
5182 else
5183 {
5184 if (fFlags & IEM_XCPT_FLAGS_ERR)
5185 *uStackFrame.pu16++ = uErr;
5186 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5187 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5188 uStackFrame.pu16[2] = fEfl;
5189 }
5190 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5191 if (rcStrict != VINF_SUCCESS)
5192 return rcStrict;
5193
5194 /* Mark the CS selector as 'accessed'. */
5195 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5196 {
5197 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5198 if (rcStrict != VINF_SUCCESS)
5199 return rcStrict;
5200 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5201 }
5202
5203 /*
5204 * Start committing the register changes (joins with the other branch).
5205 */
5206 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5207 }
5208
5209 /* ... register committing continues. */
5210 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5211 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5212 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5213 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5214 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5215 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5216
5217 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5218 fEfl &= ~fEflToClear;
5219 IEMMISC_SET_EFL(pVCpu, fEfl);
5220
5221 if (fFlags & IEM_XCPT_FLAGS_CR2)
5222 pVCpu->cpum.GstCtx.cr2 = uCr2;
5223
5224 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5225 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5226
5227 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5228}
5229
5230
5231/**
5232 * Implements exceptions and interrupts for long mode.
5233 *
5234 * @returns VBox strict status code.
5235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5236 * @param cbInstr The number of bytes to offset rIP by in the return
5237 * address.
5238 * @param u8Vector The interrupt / exception vector number.
5239 * @param fFlags The flags.
5240 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5241 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5242 */
5243IEM_STATIC VBOXSTRICTRC
5244iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5245 uint8_t cbInstr,
5246 uint8_t u8Vector,
5247 uint32_t fFlags,
5248 uint16_t uErr,
5249 uint64_t uCr2)
5250{
5251 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5252
5253 /*
5254 * Read the IDT entry.
5255 */
5256 uint16_t offIdt = (uint16_t)u8Vector << 4;
5257 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5258 {
5259 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5260 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5261 }
5262 X86DESC64 Idte;
5263 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5264 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5265 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5266 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5267 {
5268 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5269 return rcStrict;
5270 }
5271 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5272 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5273 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5274
5275 /*
5276 * Check the descriptor type, DPL and such.
5277 * ASSUMES this is done in the same order as described for call-gate calls.
5278 */
5279 if (Idte.Gate.u1DescType)
5280 {
5281 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5282 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5283 }
5284 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5285 switch (Idte.Gate.u4Type)
5286 {
5287 case AMD64_SEL_TYPE_SYS_INT_GATE:
5288 fEflToClear |= X86_EFL_IF;
5289 break;
5290 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5291 break;
5292
5293 default:
5294 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5295 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5296 }
5297
5298 /* Check DPL against CPL if applicable. */
5299 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5300 {
5301 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5302 {
5303 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5304 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5305 }
5306 }
5307
5308 /* Is it there? */
5309 if (!Idte.Gate.u1Present)
5310 {
5311 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5312 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5313 }
5314
5315 /* A null CS is bad. */
5316 RTSEL NewCS = Idte.Gate.u16Sel;
5317 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5318 {
5319 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5320 return iemRaiseGeneralProtectionFault0(pVCpu);
5321 }
5322
5323 /* Fetch the descriptor for the new CS. */
5324 IEMSELDESC DescCS;
5325 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5326 if (rcStrict != VINF_SUCCESS)
5327 {
5328 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5329 return rcStrict;
5330 }
5331
5332 /* Must be a 64-bit code segment. */
5333 if (!DescCS.Long.Gen.u1DescType)
5334 {
5335 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5336 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5337 }
5338 if ( !DescCS.Long.Gen.u1Long
5339 || DescCS.Long.Gen.u1DefBig
5340 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5341 {
5342 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5343 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5344 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5345 }
5346
5347 /* Don't allow lowering the privilege level. For non-conforming CS
5348 selectors, the CS.DPL sets the privilege level the trap/interrupt
5349 handler runs at. For conforming CS selectors, the CPL remains
5350 unchanged, but the CS.DPL must be <= CPL. */
5351 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5352 * when CPU in Ring-0. Result \#GP? */
5353 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5354 {
5355 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5356 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5357 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5358 }
5359
5360
5361 /* Make sure the selector is present. */
5362 if (!DescCS.Legacy.Gen.u1Present)
5363 {
5364 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5365 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5366 }
5367
5368 /* Check that the new RIP is canonical. */
5369 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5370 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5371 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5372 if (!IEM_IS_CANONICAL(uNewRip))
5373 {
5374 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5375 return iemRaiseGeneralProtectionFault0(pVCpu);
5376 }
5377
5378 /*
5379 * If the privilege level changes or if the IST isn't zero, we need to get
5380 * a new stack from the TSS.
5381 */
5382 uint64_t uNewRsp;
5383 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5384 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5385 if ( uNewCpl != pVCpu->iem.s.uCpl
5386 || Idte.Gate.u3IST != 0)
5387 {
5388 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5389 if (rcStrict != VINF_SUCCESS)
5390 return rcStrict;
5391 }
5392 else
5393 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5394 uNewRsp &= ~(uint64_t)0xf;
5395
5396 /*
5397 * Calc the flag image to push.
5398 */
5399 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5400 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5401 fEfl &= ~X86_EFL_RF;
5402 else
5403 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5404
5405 /*
5406 * Start making changes.
5407 */
5408 /* Set the new CPL so that stack accesses use it. */
5409 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5410 pVCpu->iem.s.uCpl = uNewCpl;
5411
5412 /* Create the stack frame. */
5413 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5414 RTPTRUNION uStackFrame;
5415 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5416 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5417 if (rcStrict != VINF_SUCCESS)
5418 return rcStrict;
5419 void * const pvStackFrame = uStackFrame.pv;
5420
5421 if (fFlags & IEM_XCPT_FLAGS_ERR)
5422 *uStackFrame.pu64++ = uErr;
5423 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5424 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5425 uStackFrame.pu64[2] = fEfl;
5426 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5427 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5428 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5429 if (rcStrict != VINF_SUCCESS)
5430 return rcStrict;
5431
5432 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5433 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5434 * after pushing the stack frame? (Write protect the gdt + stack to
5435 * find out.) */
5436 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5437 {
5438 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5439 if (rcStrict != VINF_SUCCESS)
5440 return rcStrict;
5441 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5442 }
5443
5444 /*
5445 * Start comitting the register changes.
5446 */
5447 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5448 * hidden registers when interrupting 32-bit or 16-bit code! */
5449 if (uNewCpl != uOldCpl)
5450 {
5451 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5452 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5453 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5454 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5455 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5456 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5457 }
5458 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5459 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5460 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5461 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5462 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5463 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5464 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5465 pVCpu->cpum.GstCtx.rip = uNewRip;
5466
5467 fEfl &= ~fEflToClear;
5468 IEMMISC_SET_EFL(pVCpu, fEfl);
5469
5470 if (fFlags & IEM_XCPT_FLAGS_CR2)
5471 pVCpu->cpum.GstCtx.cr2 = uCr2;
5472
5473 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5474 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5475
5476 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5477}
5478
5479
5480/**
5481 * Implements exceptions and interrupts.
5482 *
5483 * All exceptions and interrupts goes thru this function!
5484 *
5485 * @returns VBox strict status code.
5486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5487 * @param cbInstr The number of bytes to offset rIP by in the return
5488 * address.
5489 * @param u8Vector The interrupt / exception vector number.
5490 * @param fFlags The flags.
5491 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5492 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5493 */
5494DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5495iemRaiseXcptOrInt(PVMCPU pVCpu,
5496 uint8_t cbInstr,
5497 uint8_t u8Vector,
5498 uint32_t fFlags,
5499 uint16_t uErr,
5500 uint64_t uCr2)
5501{
5502 /*
5503 * Get all the state that we might need here.
5504 */
5505 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5506 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5507
5508#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5509 /*
5510 * Flush prefetch buffer
5511 */
5512 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5513#endif
5514
5515 /*
5516 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5517 */
5518 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5519 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5520 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5521 | IEM_XCPT_FLAGS_BP_INSTR
5522 | IEM_XCPT_FLAGS_ICEBP_INSTR
5523 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5524 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5525 {
5526 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5527 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5528 u8Vector = X86_XCPT_GP;
5529 uErr = 0;
5530 }
5531#ifdef DBGFTRACE_ENABLED
5532 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5533 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5534 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5535#endif
5536
5537 /*
5538 * Evaluate whether NMI blocking should be in effect.
5539 * Normally, NMI blocking is in effect whenever we inject an NMI.
5540 */
5541 bool fBlockNmi;
5542 if ( u8Vector == X86_XCPT_NMI
5543 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5544 fBlockNmi = true;
5545 else
5546 fBlockNmi = false;
5547
5548#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5549 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5550 {
5551 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5552 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5553 return rcStrict0;
5554
5555 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5556 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5557 {
5558 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5559 fBlockNmi = false;
5560 }
5561 }
5562#endif
5563
5564#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5565 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5566 {
5567 /*
5568 * If the event is being injected as part of VMRUN, it isn't subject to event
5569 * intercepts in the nested-guest. However, secondary exceptions that occur
5570 * during injection of any event -are- subject to exception intercepts.
5571 *
5572 * See AMD spec. 15.20 "Event Injection".
5573 */
5574 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5575 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5576 else
5577 {
5578 /*
5579 * Check and handle if the event being raised is intercepted.
5580 */
5581 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5582 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5583 return rcStrict0;
5584 }
5585 }
5586#endif
5587
5588 /*
5589 * Set NMI blocking if necessary.
5590 */
5591 if ( fBlockNmi
5592 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5593 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5594
5595 /*
5596 * Do recursion accounting.
5597 */
5598 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5599 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5600 if (pVCpu->iem.s.cXcptRecursions == 0)
5601 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5602 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5603 else
5604 {
5605 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5606 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5607 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5608
5609 if (pVCpu->iem.s.cXcptRecursions >= 4)
5610 {
5611#ifdef DEBUG_bird
5612 AssertFailed();
5613#endif
5614 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5615 }
5616
5617 /*
5618 * Evaluate the sequence of recurring events.
5619 */
5620 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5621 NULL /* pXcptRaiseInfo */);
5622 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5623 { /* likely */ }
5624 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5625 {
5626 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5627 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5628 u8Vector = X86_XCPT_DF;
5629 uErr = 0;
5630#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5631 /* VMX nested-guest #DF intercept needs to be checked here. */
5632 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5633 {
5634 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5635 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5636 return rcStrict0;
5637 }
5638#endif
5639 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5640 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5641 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5642 }
5643 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5644 {
5645 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5646 return iemInitiateCpuShutdown(pVCpu);
5647 }
5648 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5649 {
5650 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5651 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5652 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5653 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5654 return VERR_EM_GUEST_CPU_HANG;
5655 }
5656 else
5657 {
5658 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5659 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5660 return VERR_IEM_IPE_9;
5661 }
5662
5663 /*
5664 * The 'EXT' bit is set when an exception occurs during deliver of an external
5665 * event (such as an interrupt or earlier exception)[1]. Privileged software
5666 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5667 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5668 *
5669 * [1] - Intel spec. 6.13 "Error Code"
5670 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5671 * [3] - Intel Instruction reference for INT n.
5672 */
5673 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5674 && (fFlags & IEM_XCPT_FLAGS_ERR)
5675 && u8Vector != X86_XCPT_PF
5676 && u8Vector != X86_XCPT_DF)
5677 {
5678 uErr |= X86_TRAP_ERR_EXTERNAL;
5679 }
5680 }
5681
5682 pVCpu->iem.s.cXcptRecursions++;
5683 pVCpu->iem.s.uCurXcpt = u8Vector;
5684 pVCpu->iem.s.fCurXcpt = fFlags;
5685 pVCpu->iem.s.uCurXcptErr = uErr;
5686 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5687
5688 /*
5689 * Extensive logging.
5690 */
5691#if defined(LOG_ENABLED) && defined(IN_RING3)
5692 if (LogIs3Enabled())
5693 {
5694 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5695 PVM pVM = pVCpu->CTX_SUFF(pVM);
5696 char szRegs[4096];
5697 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5698 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5699 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5700 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5701 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5702 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5703 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5704 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5705 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5706 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5707 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5708 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5709 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5710 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5711 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5712 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5713 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5714 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5715 " efer=%016VR{efer}\n"
5716 " pat=%016VR{pat}\n"
5717 " sf_mask=%016VR{sf_mask}\n"
5718 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5719 " lstar=%016VR{lstar}\n"
5720 " star=%016VR{star} cstar=%016VR{cstar}\n"
5721 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5722 );
5723
5724 char szInstr[256];
5725 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5726 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5727 szInstr, sizeof(szInstr), NULL);
5728 Log3(("%s%s\n", szRegs, szInstr));
5729 }
5730#endif /* LOG_ENABLED */
5731
5732 /*
5733 * Call the mode specific worker function.
5734 */
5735 VBOXSTRICTRC rcStrict;
5736 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5737 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5738 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5739 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5740 else
5741 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5742
5743 /* Flush the prefetch buffer. */
5744#ifdef IEM_WITH_CODE_TLB
5745 pVCpu->iem.s.pbInstrBuf = NULL;
5746#else
5747 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5748#endif
5749
5750 /*
5751 * Unwind.
5752 */
5753 pVCpu->iem.s.cXcptRecursions--;
5754 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5755 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5756 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5757 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5758 pVCpu->iem.s.cXcptRecursions + 1));
5759 return rcStrict;
5760}
5761
5762#ifdef IEM_WITH_SETJMP
5763/**
5764 * See iemRaiseXcptOrInt. Will not return.
5765 */
5766IEM_STATIC DECL_NO_RETURN(void)
5767iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5768 uint8_t cbInstr,
5769 uint8_t u8Vector,
5770 uint32_t fFlags,
5771 uint16_t uErr,
5772 uint64_t uCr2)
5773{
5774 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5775 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5776}
5777#endif
5778
5779
5780/** \#DE - 00. */
5781DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5782{
5783 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5784}
5785
5786
5787/** \#DB - 01.
5788 * @note This automatically clear DR7.GD. */
5789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5790{
5791 /** @todo set/clear RF. */
5792 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5793 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5794}
5795
5796
5797/** \#BR - 05. */
5798DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5799{
5800 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5801}
5802
5803
5804/** \#UD - 06. */
5805DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5806{
5807 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5808}
5809
5810
5811/** \#NM - 07. */
5812DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5813{
5814 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5815}
5816
5817
5818/** \#TS(err) - 0a. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5820{
5821 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5822}
5823
5824
5825/** \#TS(tr) - 0a. */
5826DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5827{
5828 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5829 pVCpu->cpum.GstCtx.tr.Sel, 0);
5830}
5831
5832
5833/** \#TS(0) - 0a. */
5834DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5835{
5836 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5837 0, 0);
5838}
5839
5840
5841/** \#TS(err) - 0a. */
5842DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5843{
5844 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5845 uSel & X86_SEL_MASK_OFF_RPL, 0);
5846}
5847
5848
5849/** \#NP(err) - 0b. */
5850DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5851{
5852 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5853}
5854
5855
5856/** \#NP(sel) - 0b. */
5857DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5858{
5859 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5860 uSel & ~X86_SEL_RPL, 0);
5861}
5862
5863
5864/** \#SS(seg) - 0c. */
5865DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5866{
5867 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5868 uSel & ~X86_SEL_RPL, 0);
5869}
5870
5871
5872/** \#SS(err) - 0c. */
5873DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5874{
5875 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5876}
5877
5878
5879/** \#GP(n) - 0d. */
5880DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5881{
5882 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5883}
5884
5885
5886/** \#GP(0) - 0d. */
5887DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5888{
5889 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5890}
5891
5892#ifdef IEM_WITH_SETJMP
5893/** \#GP(0) - 0d. */
5894DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5895{
5896 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5897}
5898#endif
5899
5900
5901/** \#GP(sel) - 0d. */
5902DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5903{
5904 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5905 Sel & ~X86_SEL_RPL, 0);
5906}
5907
5908
5909/** \#GP(0) - 0d. */
5910DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5911{
5912 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5913}
5914
5915
5916/** \#GP(sel) - 0d. */
5917DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5918{
5919 NOREF(iSegReg); NOREF(fAccess);
5920 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5921 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5922}
5923
5924#ifdef IEM_WITH_SETJMP
5925/** \#GP(sel) - 0d, longjmp. */
5926DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5927{
5928 NOREF(iSegReg); NOREF(fAccess);
5929 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5930 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5931}
5932#endif
5933
5934/** \#GP(sel) - 0d. */
5935DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5936{
5937 NOREF(Sel);
5938 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5939}
5940
5941#ifdef IEM_WITH_SETJMP
5942/** \#GP(sel) - 0d, longjmp. */
5943DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5944{
5945 NOREF(Sel);
5946 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5947}
5948#endif
5949
5950
5951/** \#GP(sel) - 0d. */
5952DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5953{
5954 NOREF(iSegReg); NOREF(fAccess);
5955 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5956}
5957
5958#ifdef IEM_WITH_SETJMP
5959/** \#GP(sel) - 0d, longjmp. */
5960DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5961 uint32_t fAccess)
5962{
5963 NOREF(iSegReg); NOREF(fAccess);
5964 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5965}
5966#endif
5967
5968
5969/** \#PF(n) - 0e. */
5970DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5971{
5972 uint16_t uErr;
5973 switch (rc)
5974 {
5975 case VERR_PAGE_NOT_PRESENT:
5976 case VERR_PAGE_TABLE_NOT_PRESENT:
5977 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5978 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5979 uErr = 0;
5980 break;
5981
5982 default:
5983 AssertMsgFailed(("%Rrc\n", rc));
5984 RT_FALL_THRU();
5985 case VERR_ACCESS_DENIED:
5986 uErr = X86_TRAP_PF_P;
5987 break;
5988
5989 /** @todo reserved */
5990 }
5991
5992 if (pVCpu->iem.s.uCpl == 3)
5993 uErr |= X86_TRAP_PF_US;
5994
5995 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5996 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5997 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5998 uErr |= X86_TRAP_PF_ID;
5999
6000#if 0 /* This is so much non-sense, really. Why was it done like that? */
6001 /* Note! RW access callers reporting a WRITE protection fault, will clear
6002 the READ flag before calling. So, read-modify-write accesses (RW)
6003 can safely be reported as READ faults. */
6004 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6005 uErr |= X86_TRAP_PF_RW;
6006#else
6007 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6008 {
6009 if (!(fAccess & IEM_ACCESS_TYPE_READ))
6010 uErr |= X86_TRAP_PF_RW;
6011 }
6012#endif
6013
6014 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6015 uErr, GCPtrWhere);
6016}
6017
6018#ifdef IEM_WITH_SETJMP
6019/** \#PF(n) - 0e, longjmp. */
6020IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6021{
6022 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6023}
6024#endif
6025
6026
6027/** \#MF(0) - 10. */
6028DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6029{
6030 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6031}
6032
6033
6034/** \#AC(0) - 11. */
6035DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6036{
6037 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6038}
6039
6040
6041/**
6042 * Macro for calling iemCImplRaiseDivideError().
6043 *
6044 * This enables us to add/remove arguments and force different levels of
6045 * inlining as we wish.
6046 *
6047 * @return Strict VBox status code.
6048 */
6049#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6050IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6051{
6052 NOREF(cbInstr);
6053 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6054}
6055
6056
6057/**
6058 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6059 *
6060 * This enables us to add/remove arguments and force different levels of
6061 * inlining as we wish.
6062 *
6063 * @return Strict VBox status code.
6064 */
6065#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6066IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6067{
6068 NOREF(cbInstr);
6069 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6070}
6071
6072
6073/**
6074 * Macro for calling iemCImplRaiseInvalidOpcode().
6075 *
6076 * This enables us to add/remove arguments and force different levels of
6077 * inlining as we wish.
6078 *
6079 * @return Strict VBox status code.
6080 */
6081#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6082IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6083{
6084 NOREF(cbInstr);
6085 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6086}
6087
6088
6089/** @} */
6090
6091
6092/*
6093 *
6094 * Helpers routines.
6095 * Helpers routines.
6096 * Helpers routines.
6097 *
6098 */
6099
6100/**
6101 * Recalculates the effective operand size.
6102 *
6103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6104 */
6105IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6106{
6107 switch (pVCpu->iem.s.enmCpuMode)
6108 {
6109 case IEMMODE_16BIT:
6110 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6111 break;
6112 case IEMMODE_32BIT:
6113 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6114 break;
6115 case IEMMODE_64BIT:
6116 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6117 {
6118 case 0:
6119 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6120 break;
6121 case IEM_OP_PRF_SIZE_OP:
6122 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6123 break;
6124 case IEM_OP_PRF_SIZE_REX_W:
6125 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6126 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6127 break;
6128 }
6129 break;
6130 default:
6131 AssertFailed();
6132 }
6133}
6134
6135
6136/**
6137 * Sets the default operand size to 64-bit and recalculates the effective
6138 * operand size.
6139 *
6140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6141 */
6142IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6143{
6144 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6145 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6146 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6147 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6148 else
6149 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6150}
6151
6152
6153/*
6154 *
6155 * Common opcode decoders.
6156 * Common opcode decoders.
6157 * Common opcode decoders.
6158 *
6159 */
6160//#include <iprt/mem.h>
6161
6162/**
6163 * Used to add extra details about a stub case.
6164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6165 */
6166IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6167{
6168#if defined(LOG_ENABLED) && defined(IN_RING3)
6169 PVM pVM = pVCpu->CTX_SUFF(pVM);
6170 char szRegs[4096];
6171 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6172 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6173 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6174 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6175 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6176 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6177 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6178 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6179 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6180 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6181 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6182 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6183 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6184 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6185 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6186 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6187 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6188 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6189 " efer=%016VR{efer}\n"
6190 " pat=%016VR{pat}\n"
6191 " sf_mask=%016VR{sf_mask}\n"
6192 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6193 " lstar=%016VR{lstar}\n"
6194 " star=%016VR{star} cstar=%016VR{cstar}\n"
6195 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6196 );
6197
6198 char szInstr[256];
6199 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6200 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6201 szInstr, sizeof(szInstr), NULL);
6202
6203 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6204#else
6205 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6206#endif
6207}
6208
6209/**
6210 * Complains about a stub.
6211 *
6212 * Providing two versions of this macro, one for daily use and one for use when
6213 * working on IEM.
6214 */
6215#if 0
6216# define IEMOP_BITCH_ABOUT_STUB() \
6217 do { \
6218 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6219 iemOpStubMsg2(pVCpu); \
6220 RTAssertPanic(); \
6221 } while (0)
6222#else
6223# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6224#endif
6225
6226/** Stubs an opcode. */
6227#define FNIEMOP_STUB(a_Name) \
6228 FNIEMOP_DEF(a_Name) \
6229 { \
6230 RT_NOREF_PV(pVCpu); \
6231 IEMOP_BITCH_ABOUT_STUB(); \
6232 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6233 } \
6234 typedef int ignore_semicolon
6235
6236/** Stubs an opcode. */
6237#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6238 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6239 { \
6240 RT_NOREF_PV(pVCpu); \
6241 RT_NOREF_PV(a_Name0); \
6242 IEMOP_BITCH_ABOUT_STUB(); \
6243 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6244 } \
6245 typedef int ignore_semicolon
6246
6247/** Stubs an opcode which currently should raise \#UD. */
6248#define FNIEMOP_UD_STUB(a_Name) \
6249 FNIEMOP_DEF(a_Name) \
6250 { \
6251 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6252 return IEMOP_RAISE_INVALID_OPCODE(); \
6253 } \
6254 typedef int ignore_semicolon
6255
6256/** Stubs an opcode which currently should raise \#UD. */
6257#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6258 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6259 { \
6260 RT_NOREF_PV(pVCpu); \
6261 RT_NOREF_PV(a_Name0); \
6262 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6263 return IEMOP_RAISE_INVALID_OPCODE(); \
6264 } \
6265 typedef int ignore_semicolon
6266
6267
6268
6269/** @name Register Access.
6270 * @{
6271 */
6272
6273/**
6274 * Gets a reference (pointer) to the specified hidden segment register.
6275 *
6276 * @returns Hidden register reference.
6277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6278 * @param iSegReg The segment register.
6279 */
6280IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6281{
6282 Assert(iSegReg < X86_SREG_COUNT);
6283 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6284 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6285
6286#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6287 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6288 { /* likely */ }
6289 else
6290 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6291#else
6292 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6293#endif
6294 return pSReg;
6295}
6296
6297
6298/**
6299 * Ensures that the given hidden segment register is up to date.
6300 *
6301 * @returns Hidden register reference.
6302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6303 * @param pSReg The segment register.
6304 */
6305IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6306{
6307#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6308 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6309 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6310#else
6311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6312 NOREF(pVCpu);
6313#endif
6314 return pSReg;
6315}
6316
6317
6318/**
6319 * Gets a reference (pointer) to the specified segment register (the selector
6320 * value).
6321 *
6322 * @returns Pointer to the selector variable.
6323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6324 * @param iSegReg The segment register.
6325 */
6326DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6327{
6328 Assert(iSegReg < X86_SREG_COUNT);
6329 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6330 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6331}
6332
6333
6334/**
6335 * Fetches the selector value of a segment register.
6336 *
6337 * @returns The selector value.
6338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6339 * @param iSegReg The segment register.
6340 */
6341DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6342{
6343 Assert(iSegReg < X86_SREG_COUNT);
6344 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6345 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6346}
6347
6348
6349/**
6350 * Fetches the base address value of a segment register.
6351 *
6352 * @returns The selector value.
6353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6354 * @param iSegReg The segment register.
6355 */
6356DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6357{
6358 Assert(iSegReg < X86_SREG_COUNT);
6359 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6360 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6361}
6362
6363
6364/**
6365 * Gets a reference (pointer) to the specified general purpose register.
6366 *
6367 * @returns Register reference.
6368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6369 * @param iReg The general purpose register.
6370 */
6371DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6372{
6373 Assert(iReg < 16);
6374 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6375}
6376
6377
6378/**
6379 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6380 *
6381 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6382 *
6383 * @returns Register reference.
6384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6385 * @param iReg The register.
6386 */
6387DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6388{
6389 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6390 {
6391 Assert(iReg < 16);
6392 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6393 }
6394 /* high 8-bit register. */
6395 Assert(iReg < 8);
6396 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6397}
6398
6399
6400/**
6401 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6402 *
6403 * @returns Register reference.
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 * @param iReg The register.
6406 */
6407DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6408{
6409 Assert(iReg < 16);
6410 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6411}
6412
6413
6414/**
6415 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6416 *
6417 * @returns Register reference.
6418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6419 * @param iReg The register.
6420 */
6421DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6422{
6423 Assert(iReg < 16);
6424 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6425}
6426
6427
6428/**
6429 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6430 *
6431 * @returns Register reference.
6432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6433 * @param iReg The register.
6434 */
6435DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6436{
6437 Assert(iReg < 64);
6438 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6439}
6440
6441
6442/**
6443 * Gets a reference (pointer) to the specified segment register's base address.
6444 *
6445 * @returns Segment register base address reference.
6446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6447 * @param iSegReg The segment selector.
6448 */
6449DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6450{
6451 Assert(iSegReg < X86_SREG_COUNT);
6452 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6453 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6454}
6455
6456
6457/**
6458 * Fetches the value of a 8-bit general purpose register.
6459 *
6460 * @returns The register value.
6461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6462 * @param iReg The register.
6463 */
6464DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6465{
6466 return *iemGRegRefU8(pVCpu, iReg);
6467}
6468
6469
6470/**
6471 * Fetches the value of a 16-bit general purpose register.
6472 *
6473 * @returns The register value.
6474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6475 * @param iReg The register.
6476 */
6477DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6478{
6479 Assert(iReg < 16);
6480 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6481}
6482
6483
6484/**
6485 * Fetches the value of a 32-bit general purpose register.
6486 *
6487 * @returns The register value.
6488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6489 * @param iReg The register.
6490 */
6491DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6492{
6493 Assert(iReg < 16);
6494 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6495}
6496
6497
6498/**
6499 * Fetches the value of a 64-bit general purpose register.
6500 *
6501 * @returns The register value.
6502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6503 * @param iReg The register.
6504 */
6505DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6506{
6507 Assert(iReg < 16);
6508 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6509}
6510
6511
6512/**
6513 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6514 *
6515 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6516 * segment limit.
6517 *
6518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6519 * @param offNextInstr The offset of the next instruction.
6520 */
6521IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6522{
6523 switch (pVCpu->iem.s.enmEffOpSize)
6524 {
6525 case IEMMODE_16BIT:
6526 {
6527 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6528 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6529 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6530 return iemRaiseGeneralProtectionFault0(pVCpu);
6531 pVCpu->cpum.GstCtx.rip = uNewIp;
6532 break;
6533 }
6534
6535 case IEMMODE_32BIT:
6536 {
6537 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6538 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6539
6540 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6541 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6542 return iemRaiseGeneralProtectionFault0(pVCpu);
6543 pVCpu->cpum.GstCtx.rip = uNewEip;
6544 break;
6545 }
6546
6547 case IEMMODE_64BIT:
6548 {
6549 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6550
6551 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6552 if (!IEM_IS_CANONICAL(uNewRip))
6553 return iemRaiseGeneralProtectionFault0(pVCpu);
6554 pVCpu->cpum.GstCtx.rip = uNewRip;
6555 break;
6556 }
6557
6558 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6559 }
6560
6561 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6562
6563#ifndef IEM_WITH_CODE_TLB
6564 /* Flush the prefetch buffer. */
6565 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6566#endif
6567
6568 return VINF_SUCCESS;
6569}
6570
6571
6572/**
6573 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6574 *
6575 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6576 * segment limit.
6577 *
6578 * @returns Strict VBox status code.
6579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6580 * @param offNextInstr The offset of the next instruction.
6581 */
6582IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6583{
6584 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6585
6586 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6587 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6588 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6589 return iemRaiseGeneralProtectionFault0(pVCpu);
6590 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6591 pVCpu->cpum.GstCtx.rip = uNewIp;
6592 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6593
6594#ifndef IEM_WITH_CODE_TLB
6595 /* Flush the prefetch buffer. */
6596 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6597#endif
6598
6599 return VINF_SUCCESS;
6600}
6601
6602
6603/**
6604 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6605 *
6606 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6607 * segment limit.
6608 *
6609 * @returns Strict VBox status code.
6610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6611 * @param offNextInstr The offset of the next instruction.
6612 */
6613IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6614{
6615 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6616
6617 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6618 {
6619 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6620
6621 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6622 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6623 return iemRaiseGeneralProtectionFault0(pVCpu);
6624 pVCpu->cpum.GstCtx.rip = uNewEip;
6625 }
6626 else
6627 {
6628 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6629
6630 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6631 if (!IEM_IS_CANONICAL(uNewRip))
6632 return iemRaiseGeneralProtectionFault0(pVCpu);
6633 pVCpu->cpum.GstCtx.rip = uNewRip;
6634 }
6635 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6636
6637#ifndef IEM_WITH_CODE_TLB
6638 /* Flush the prefetch buffer. */
6639 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6640#endif
6641
6642 return VINF_SUCCESS;
6643}
6644
6645
6646/**
6647 * Performs a near jump to the specified address.
6648 *
6649 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6650 * segment limit.
6651 *
6652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6653 * @param uNewRip The new RIP value.
6654 */
6655IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6656{
6657 switch (pVCpu->iem.s.enmEffOpSize)
6658 {
6659 case IEMMODE_16BIT:
6660 {
6661 Assert(uNewRip <= UINT16_MAX);
6662 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6663 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6664 return iemRaiseGeneralProtectionFault0(pVCpu);
6665 /** @todo Test 16-bit jump in 64-bit mode. */
6666 pVCpu->cpum.GstCtx.rip = uNewRip;
6667 break;
6668 }
6669
6670 case IEMMODE_32BIT:
6671 {
6672 Assert(uNewRip <= UINT32_MAX);
6673 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6674 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6675
6676 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6677 return iemRaiseGeneralProtectionFault0(pVCpu);
6678 pVCpu->cpum.GstCtx.rip = uNewRip;
6679 break;
6680 }
6681
6682 case IEMMODE_64BIT:
6683 {
6684 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6685
6686 if (!IEM_IS_CANONICAL(uNewRip))
6687 return iemRaiseGeneralProtectionFault0(pVCpu);
6688 pVCpu->cpum.GstCtx.rip = uNewRip;
6689 break;
6690 }
6691
6692 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6693 }
6694
6695 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6696
6697#ifndef IEM_WITH_CODE_TLB
6698 /* Flush the prefetch buffer. */
6699 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6700#endif
6701
6702 return VINF_SUCCESS;
6703}
6704
6705
6706/**
6707 * Get the address of the top of the stack.
6708 *
6709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6710 */
6711DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6712{
6713 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6714 return pVCpu->cpum.GstCtx.rsp;
6715 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6716 return pVCpu->cpum.GstCtx.esp;
6717 return pVCpu->cpum.GstCtx.sp;
6718}
6719
6720
6721/**
6722 * Updates the RIP/EIP/IP to point to the next instruction.
6723 *
6724 * This function leaves the EFLAGS.RF flag alone.
6725 *
6726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6727 * @param cbInstr The number of bytes to add.
6728 */
6729IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6730{
6731 switch (pVCpu->iem.s.enmCpuMode)
6732 {
6733 case IEMMODE_16BIT:
6734 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6735 pVCpu->cpum.GstCtx.eip += cbInstr;
6736 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6737 break;
6738
6739 case IEMMODE_32BIT:
6740 pVCpu->cpum.GstCtx.eip += cbInstr;
6741 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6742 break;
6743
6744 case IEMMODE_64BIT:
6745 pVCpu->cpum.GstCtx.rip += cbInstr;
6746 break;
6747 default: AssertFailed();
6748 }
6749}
6750
6751
6752#if 0
6753/**
6754 * Updates the RIP/EIP/IP to point to the next instruction.
6755 *
6756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6757 */
6758IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6759{
6760 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6761}
6762#endif
6763
6764
6765
6766/**
6767 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6768 *
6769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6770 * @param cbInstr The number of bytes to add.
6771 */
6772IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6773{
6774 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6775
6776 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6777#if ARCH_BITS >= 64
6778 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6779 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6780 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6781#else
6782 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6783 pVCpu->cpum.GstCtx.rip += cbInstr;
6784 else
6785 pVCpu->cpum.GstCtx.eip += cbInstr;
6786#endif
6787}
6788
6789
6790/**
6791 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6792 *
6793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6794 */
6795IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6796{
6797 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6798}
6799
6800
6801/**
6802 * Adds to the stack pointer.
6803 *
6804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6805 * @param cbToAdd The number of bytes to add (8-bit!).
6806 */
6807DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6808{
6809 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6810 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6811 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6812 pVCpu->cpum.GstCtx.esp += cbToAdd;
6813 else
6814 pVCpu->cpum.GstCtx.sp += cbToAdd;
6815}
6816
6817
6818/**
6819 * Subtracts from the stack pointer.
6820 *
6821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6822 * @param cbToSub The number of bytes to subtract (8-bit!).
6823 */
6824DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6825{
6826 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6827 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6828 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6829 pVCpu->cpum.GstCtx.esp -= cbToSub;
6830 else
6831 pVCpu->cpum.GstCtx.sp -= cbToSub;
6832}
6833
6834
6835/**
6836 * Adds to the temporary stack pointer.
6837 *
6838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6839 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6840 * @param cbToAdd The number of bytes to add (16-bit).
6841 */
6842DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6843{
6844 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6845 pTmpRsp->u += cbToAdd;
6846 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6847 pTmpRsp->DWords.dw0 += cbToAdd;
6848 else
6849 pTmpRsp->Words.w0 += cbToAdd;
6850}
6851
6852
6853/**
6854 * Subtracts from the temporary stack pointer.
6855 *
6856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6857 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6858 * @param cbToSub The number of bytes to subtract.
6859 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6860 * expecting that.
6861 */
6862DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6863{
6864 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6865 pTmpRsp->u -= cbToSub;
6866 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6867 pTmpRsp->DWords.dw0 -= cbToSub;
6868 else
6869 pTmpRsp->Words.w0 -= cbToSub;
6870}
6871
6872
6873/**
6874 * Calculates the effective stack address for a push of the specified size as
6875 * well as the new RSP value (upper bits may be masked).
6876 *
6877 * @returns Effective stack addressf for the push.
6878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6879 * @param cbItem The size of the stack item to pop.
6880 * @param puNewRsp Where to return the new RSP value.
6881 */
6882DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6883{
6884 RTUINT64U uTmpRsp;
6885 RTGCPTR GCPtrTop;
6886 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6887
6888 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6889 GCPtrTop = uTmpRsp.u -= cbItem;
6890 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6891 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6892 else
6893 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6894 *puNewRsp = uTmpRsp.u;
6895 return GCPtrTop;
6896}
6897
6898
6899/**
6900 * Gets the current stack pointer and calculates the value after a pop of the
6901 * specified size.
6902 *
6903 * @returns Current stack pointer.
6904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6905 * @param cbItem The size of the stack item to pop.
6906 * @param puNewRsp Where to return the new RSP value.
6907 */
6908DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6909{
6910 RTUINT64U uTmpRsp;
6911 RTGCPTR GCPtrTop;
6912 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6913
6914 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6915 {
6916 GCPtrTop = uTmpRsp.u;
6917 uTmpRsp.u += cbItem;
6918 }
6919 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6920 {
6921 GCPtrTop = uTmpRsp.DWords.dw0;
6922 uTmpRsp.DWords.dw0 += cbItem;
6923 }
6924 else
6925 {
6926 GCPtrTop = uTmpRsp.Words.w0;
6927 uTmpRsp.Words.w0 += cbItem;
6928 }
6929 *puNewRsp = uTmpRsp.u;
6930 return GCPtrTop;
6931}
6932
6933
6934/**
6935 * Calculates the effective stack address for a push of the specified size as
6936 * well as the new temporary RSP value (upper bits may be masked).
6937 *
6938 * @returns Effective stack addressf for the push.
6939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6940 * @param pTmpRsp The temporary stack pointer. This is updated.
6941 * @param cbItem The size of the stack item to pop.
6942 */
6943DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6944{
6945 RTGCPTR GCPtrTop;
6946
6947 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6948 GCPtrTop = pTmpRsp->u -= cbItem;
6949 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6950 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6951 else
6952 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6953 return GCPtrTop;
6954}
6955
6956
6957/**
6958 * Gets the effective stack address for a pop of the specified size and
6959 * calculates and updates the temporary RSP.
6960 *
6961 * @returns Current stack pointer.
6962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6963 * @param pTmpRsp The temporary stack pointer. This is updated.
6964 * @param cbItem The size of the stack item to pop.
6965 */
6966DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6967{
6968 RTGCPTR GCPtrTop;
6969 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6970 {
6971 GCPtrTop = pTmpRsp->u;
6972 pTmpRsp->u += cbItem;
6973 }
6974 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6975 {
6976 GCPtrTop = pTmpRsp->DWords.dw0;
6977 pTmpRsp->DWords.dw0 += cbItem;
6978 }
6979 else
6980 {
6981 GCPtrTop = pTmpRsp->Words.w0;
6982 pTmpRsp->Words.w0 += cbItem;
6983 }
6984 return GCPtrTop;
6985}
6986
6987/** @} */
6988
6989
6990/** @name FPU access and helpers.
6991 *
6992 * @{
6993 */
6994
6995
6996/**
6997 * Hook for preparing to use the host FPU.
6998 *
6999 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7000 *
7001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7002 */
7003DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
7004{
7005#ifdef IN_RING3
7006 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7007#else
7008 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7009#endif
7010 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7011}
7012
7013
7014/**
7015 * Hook for preparing to use the host FPU for SSE.
7016 *
7017 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7018 *
7019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7020 */
7021DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
7022{
7023 iemFpuPrepareUsage(pVCpu);
7024}
7025
7026
7027/**
7028 * Hook for preparing to use the host FPU for AVX.
7029 *
7030 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7031 *
7032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7033 */
7034DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7035{
7036 iemFpuPrepareUsage(pVCpu);
7037}
7038
7039
7040/**
7041 * Hook for actualizing the guest FPU state before the interpreter reads it.
7042 *
7043 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7044 *
7045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7046 */
7047DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7048{
7049#ifdef IN_RING3
7050 NOREF(pVCpu);
7051#else
7052 CPUMRZFpuStateActualizeForRead(pVCpu);
7053#endif
7054 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7055}
7056
7057
7058/**
7059 * Hook for actualizing the guest FPU state before the interpreter changes it.
7060 *
7061 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7062 *
7063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7064 */
7065DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7066{
7067#ifdef IN_RING3
7068 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7069#else
7070 CPUMRZFpuStateActualizeForChange(pVCpu);
7071#endif
7072 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7073}
7074
7075
7076/**
7077 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7078 * only.
7079 *
7080 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7081 *
7082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7083 */
7084DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7085{
7086#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7087 NOREF(pVCpu);
7088#else
7089 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7090#endif
7091 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7092}
7093
7094
7095/**
7096 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7097 * read+write.
7098 *
7099 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7100 *
7101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7102 */
7103DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7104{
7105#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7106 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7107#else
7108 CPUMRZFpuStateActualizeForChange(pVCpu);
7109#endif
7110 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7111}
7112
7113
7114/**
7115 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7116 * only.
7117 *
7118 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7119 *
7120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7121 */
7122DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7123{
7124#ifdef IN_RING3
7125 NOREF(pVCpu);
7126#else
7127 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7128#endif
7129 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7130}
7131
7132
7133/**
7134 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7135 * read+write.
7136 *
7137 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7138 *
7139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7140 */
7141DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7142{
7143#ifdef IN_RING3
7144 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7145#else
7146 CPUMRZFpuStateActualizeForChange(pVCpu);
7147#endif
7148 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7149}
7150
7151
7152/**
7153 * Stores a QNaN value into a FPU register.
7154 *
7155 * @param pReg Pointer to the register.
7156 */
7157DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7158{
7159 pReg->au32[0] = UINT32_C(0x00000000);
7160 pReg->au32[1] = UINT32_C(0xc0000000);
7161 pReg->au16[4] = UINT16_C(0xffff);
7162}
7163
7164
7165/**
7166 * Updates the FOP, FPU.CS and FPUIP registers.
7167 *
7168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7169 * @param pFpuCtx The FPU context.
7170 */
7171DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7172{
7173 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7174 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7175 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7176 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7177 {
7178 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7179 * happens in real mode here based on the fnsave and fnstenv images. */
7180 pFpuCtx->CS = 0;
7181 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7182 }
7183 else
7184 {
7185 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7186 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7187 }
7188}
7189
7190
7191/**
7192 * Updates the x87.DS and FPUDP registers.
7193 *
7194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7195 * @param pFpuCtx The FPU context.
7196 * @param iEffSeg The effective segment register.
7197 * @param GCPtrEff The effective address relative to @a iEffSeg.
7198 */
7199DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7200{
7201 RTSEL sel;
7202 switch (iEffSeg)
7203 {
7204 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7205 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7206 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7207 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7208 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7209 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7210 default:
7211 AssertMsgFailed(("%d\n", iEffSeg));
7212 sel = pVCpu->cpum.GstCtx.ds.Sel;
7213 }
7214 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7215 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7216 {
7217 pFpuCtx->DS = 0;
7218 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7219 }
7220 else
7221 {
7222 pFpuCtx->DS = sel;
7223 pFpuCtx->FPUDP = GCPtrEff;
7224 }
7225}
7226
7227
7228/**
7229 * Rotates the stack registers in the push direction.
7230 *
7231 * @param pFpuCtx The FPU context.
7232 * @remarks This is a complete waste of time, but fxsave stores the registers in
7233 * stack order.
7234 */
7235DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7236{
7237 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7238 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7239 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7240 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7241 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7242 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7243 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7244 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7245 pFpuCtx->aRegs[0].r80 = r80Tmp;
7246}
7247
7248
7249/**
7250 * Rotates the stack registers in the pop direction.
7251 *
7252 * @param pFpuCtx The FPU context.
7253 * @remarks This is a complete waste of time, but fxsave stores the registers in
7254 * stack order.
7255 */
7256DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7257{
7258 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7259 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7260 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7261 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7262 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7263 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7264 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7265 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7266 pFpuCtx->aRegs[7].r80 = r80Tmp;
7267}
7268
7269
7270/**
7271 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7272 * exception prevents it.
7273 *
7274 * @param pResult The FPU operation result to push.
7275 * @param pFpuCtx The FPU context.
7276 */
7277IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7278{
7279 /* Update FSW and bail if there are pending exceptions afterwards. */
7280 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7281 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7282 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7283 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7284 {
7285 pFpuCtx->FSW = fFsw;
7286 return;
7287 }
7288
7289 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7290 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7291 {
7292 /* All is fine, push the actual value. */
7293 pFpuCtx->FTW |= RT_BIT(iNewTop);
7294 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7295 }
7296 else if (pFpuCtx->FCW & X86_FCW_IM)
7297 {
7298 /* Masked stack overflow, push QNaN. */
7299 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7300 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7301 }
7302 else
7303 {
7304 /* Raise stack overflow, don't push anything. */
7305 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7306 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7307 return;
7308 }
7309
7310 fFsw &= ~X86_FSW_TOP_MASK;
7311 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7312 pFpuCtx->FSW = fFsw;
7313
7314 iemFpuRotateStackPush(pFpuCtx);
7315}
7316
7317
7318/**
7319 * Stores a result in a FPU register and updates the FSW and FTW.
7320 *
7321 * @param pFpuCtx The FPU context.
7322 * @param pResult The result to store.
7323 * @param iStReg Which FPU register to store it in.
7324 */
7325IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7326{
7327 Assert(iStReg < 8);
7328 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7329 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7330 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7331 pFpuCtx->FTW |= RT_BIT(iReg);
7332 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7333}
7334
7335
7336/**
7337 * Only updates the FPU status word (FSW) with the result of the current
7338 * instruction.
7339 *
7340 * @param pFpuCtx The FPU context.
7341 * @param u16FSW The FSW output of the current instruction.
7342 */
7343IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7344{
7345 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7346 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7347}
7348
7349
7350/**
7351 * Pops one item off the FPU stack if no pending exception prevents it.
7352 *
7353 * @param pFpuCtx The FPU context.
7354 */
7355IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7356{
7357 /* Check pending exceptions. */
7358 uint16_t uFSW = pFpuCtx->FSW;
7359 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7360 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7361 return;
7362
7363 /* TOP--. */
7364 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7365 uFSW &= ~X86_FSW_TOP_MASK;
7366 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7367 pFpuCtx->FSW = uFSW;
7368
7369 /* Mark the previous ST0 as empty. */
7370 iOldTop >>= X86_FSW_TOP_SHIFT;
7371 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7372
7373 /* Rotate the registers. */
7374 iemFpuRotateStackPop(pFpuCtx);
7375}
7376
7377
7378/**
7379 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7380 *
7381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7382 * @param pResult The FPU operation result to push.
7383 */
7384IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7385{
7386 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7387 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7388 iemFpuMaybePushResult(pResult, pFpuCtx);
7389}
7390
7391
7392/**
7393 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7394 * and sets FPUDP and FPUDS.
7395 *
7396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7397 * @param pResult The FPU operation result to push.
7398 * @param iEffSeg The effective segment register.
7399 * @param GCPtrEff The effective address relative to @a iEffSeg.
7400 */
7401IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7402{
7403 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7404 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7405 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7406 iemFpuMaybePushResult(pResult, pFpuCtx);
7407}
7408
7409
7410/**
7411 * Replace ST0 with the first value and push the second onto the FPU stack,
7412 * unless a pending exception prevents it.
7413 *
7414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7415 * @param pResult The FPU operation result to store and push.
7416 */
7417IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7418{
7419 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7420 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7421
7422 /* Update FSW and bail if there are pending exceptions afterwards. */
7423 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7424 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7425 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7426 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7427 {
7428 pFpuCtx->FSW = fFsw;
7429 return;
7430 }
7431
7432 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7433 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7434 {
7435 /* All is fine, push the actual value. */
7436 pFpuCtx->FTW |= RT_BIT(iNewTop);
7437 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7438 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7439 }
7440 else if (pFpuCtx->FCW & X86_FCW_IM)
7441 {
7442 /* Masked stack overflow, push QNaN. */
7443 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7444 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7445 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7446 }
7447 else
7448 {
7449 /* Raise stack overflow, don't push anything. */
7450 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7451 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7452 return;
7453 }
7454
7455 fFsw &= ~X86_FSW_TOP_MASK;
7456 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7457 pFpuCtx->FSW = fFsw;
7458
7459 iemFpuRotateStackPush(pFpuCtx);
7460}
7461
7462
7463/**
7464 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7465 * FOP.
7466 *
7467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7468 * @param pResult The result to store.
7469 * @param iStReg Which FPU register to store it in.
7470 */
7471IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7472{
7473 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7474 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7475 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7476}
7477
7478
7479/**
7480 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7481 * FOP, and then pops the stack.
7482 *
7483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7484 * @param pResult The result to store.
7485 * @param iStReg Which FPU register to store it in.
7486 */
7487IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7488{
7489 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7490 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7491 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7492 iemFpuMaybePopOne(pFpuCtx);
7493}
7494
7495
7496/**
7497 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7498 * FPUDP, and FPUDS.
7499 *
7500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7501 * @param pResult The result to store.
7502 * @param iStReg Which FPU register to store it in.
7503 * @param iEffSeg The effective memory operand selector register.
7504 * @param GCPtrEff The effective memory operand offset.
7505 */
7506IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7507 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7508{
7509 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7510 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7511 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7512 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7513}
7514
7515
7516/**
7517 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7518 * FPUDP, and FPUDS, and then pops the stack.
7519 *
7520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7521 * @param pResult The result to store.
7522 * @param iStReg Which FPU register to store it in.
7523 * @param iEffSeg The effective memory operand selector register.
7524 * @param GCPtrEff The effective memory operand offset.
7525 */
7526IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7527 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7528{
7529 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7530 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7531 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7532 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7533 iemFpuMaybePopOne(pFpuCtx);
7534}
7535
7536
7537/**
7538 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7539 *
7540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7541 */
7542IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7543{
7544 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7545 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7546}
7547
7548
7549/**
7550 * Marks the specified stack register as free (for FFREE).
7551 *
7552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7553 * @param iStReg The register to free.
7554 */
7555IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7556{
7557 Assert(iStReg < 8);
7558 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7559 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7560 pFpuCtx->FTW &= ~RT_BIT(iReg);
7561}
7562
7563
7564/**
7565 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7566 *
7567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7568 */
7569IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7570{
7571 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7572 uint16_t uFsw = pFpuCtx->FSW;
7573 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7574 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7575 uFsw &= ~X86_FSW_TOP_MASK;
7576 uFsw |= uTop;
7577 pFpuCtx->FSW = uFsw;
7578}
7579
7580
7581/**
7582 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7583 *
7584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7585 */
7586IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7587{
7588 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7589 uint16_t uFsw = pFpuCtx->FSW;
7590 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7591 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7592 uFsw &= ~X86_FSW_TOP_MASK;
7593 uFsw |= uTop;
7594 pFpuCtx->FSW = uFsw;
7595}
7596
7597
7598/**
7599 * Updates the FSW, FOP, FPUIP, and FPUCS.
7600 *
7601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7602 * @param u16FSW The FSW from the current instruction.
7603 */
7604IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7605{
7606 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7607 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7608 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7609}
7610
7611
7612/**
7613 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7614 *
7615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7616 * @param u16FSW The FSW from the current instruction.
7617 */
7618IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7619{
7620 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7621 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7622 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7623 iemFpuMaybePopOne(pFpuCtx);
7624}
7625
7626
7627/**
7628 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7629 *
7630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7631 * @param u16FSW The FSW from the current instruction.
7632 * @param iEffSeg The effective memory operand selector register.
7633 * @param GCPtrEff The effective memory operand offset.
7634 */
7635IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7636{
7637 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7638 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7639 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7640 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7641}
7642
7643
7644/**
7645 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7646 *
7647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7648 * @param u16FSW The FSW from the current instruction.
7649 */
7650IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7651{
7652 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7653 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7654 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7655 iemFpuMaybePopOne(pFpuCtx);
7656 iemFpuMaybePopOne(pFpuCtx);
7657}
7658
7659
7660/**
7661 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7662 *
7663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7664 * @param u16FSW The FSW from the current instruction.
7665 * @param iEffSeg The effective memory operand selector register.
7666 * @param GCPtrEff The effective memory operand offset.
7667 */
7668IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7669{
7670 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7671 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7672 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7673 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7674 iemFpuMaybePopOne(pFpuCtx);
7675}
7676
7677
7678/**
7679 * Worker routine for raising an FPU stack underflow exception.
7680 *
7681 * @param pFpuCtx The FPU context.
7682 * @param iStReg The stack register being accessed.
7683 */
7684IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7685{
7686 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7687 if (pFpuCtx->FCW & X86_FCW_IM)
7688 {
7689 /* Masked underflow. */
7690 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7691 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7692 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7693 if (iStReg != UINT8_MAX)
7694 {
7695 pFpuCtx->FTW |= RT_BIT(iReg);
7696 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7697 }
7698 }
7699 else
7700 {
7701 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7702 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7703 }
7704}
7705
7706
7707/**
7708 * Raises a FPU stack underflow exception.
7709 *
7710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7711 * @param iStReg The destination register that should be loaded
7712 * with QNaN if \#IS is not masked. Specify
7713 * UINT8_MAX if none (like for fcom).
7714 */
7715DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7716{
7717 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7718 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7719 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7720}
7721
7722
7723DECL_NO_INLINE(IEM_STATIC, void)
7724iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7725{
7726 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7727 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7728 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7729 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7730}
7731
7732
7733DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7734{
7735 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7736 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7737 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7738 iemFpuMaybePopOne(pFpuCtx);
7739}
7740
7741
7742DECL_NO_INLINE(IEM_STATIC, void)
7743iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7744{
7745 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7746 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7747 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7748 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7749 iemFpuMaybePopOne(pFpuCtx);
7750}
7751
7752
7753DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7754{
7755 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7756 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7757 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7758 iemFpuMaybePopOne(pFpuCtx);
7759 iemFpuMaybePopOne(pFpuCtx);
7760}
7761
7762
7763DECL_NO_INLINE(IEM_STATIC, void)
7764iemFpuStackPushUnderflow(PVMCPU pVCpu)
7765{
7766 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7767 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7768
7769 if (pFpuCtx->FCW & X86_FCW_IM)
7770 {
7771 /* Masked overflow - Push QNaN. */
7772 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7773 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7774 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7775 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7776 pFpuCtx->FTW |= RT_BIT(iNewTop);
7777 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7778 iemFpuRotateStackPush(pFpuCtx);
7779 }
7780 else
7781 {
7782 /* Exception pending - don't change TOP or the register stack. */
7783 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7784 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7785 }
7786}
7787
7788
7789DECL_NO_INLINE(IEM_STATIC, void)
7790iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7791{
7792 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7793 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7794
7795 if (pFpuCtx->FCW & X86_FCW_IM)
7796 {
7797 /* Masked overflow - Push QNaN. */
7798 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7799 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7800 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7801 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7802 pFpuCtx->FTW |= RT_BIT(iNewTop);
7803 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7804 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7805 iemFpuRotateStackPush(pFpuCtx);
7806 }
7807 else
7808 {
7809 /* Exception pending - don't change TOP or the register stack. */
7810 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7811 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7812 }
7813}
7814
7815
7816/**
7817 * Worker routine for raising an FPU stack overflow exception on a push.
7818 *
7819 * @param pFpuCtx The FPU context.
7820 */
7821IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7822{
7823 if (pFpuCtx->FCW & X86_FCW_IM)
7824 {
7825 /* Masked overflow. */
7826 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7827 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7828 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7829 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7830 pFpuCtx->FTW |= RT_BIT(iNewTop);
7831 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7832 iemFpuRotateStackPush(pFpuCtx);
7833 }
7834 else
7835 {
7836 /* Exception pending - don't change TOP or the register stack. */
7837 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7838 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7839 }
7840}
7841
7842
7843/**
7844 * Raises a FPU stack overflow exception on a push.
7845 *
7846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7847 */
7848DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7849{
7850 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7851 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7852 iemFpuStackPushOverflowOnly(pFpuCtx);
7853}
7854
7855
7856/**
7857 * Raises a FPU stack overflow exception on a push with a memory operand.
7858 *
7859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7860 * @param iEffSeg The effective memory operand selector register.
7861 * @param GCPtrEff The effective memory operand offset.
7862 */
7863DECL_NO_INLINE(IEM_STATIC, void)
7864iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7865{
7866 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7867 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7868 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7869 iemFpuStackPushOverflowOnly(pFpuCtx);
7870}
7871
7872
7873IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7874{
7875 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7876 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7877 if (pFpuCtx->FTW & RT_BIT(iReg))
7878 return VINF_SUCCESS;
7879 return VERR_NOT_FOUND;
7880}
7881
7882
7883IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7884{
7885 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7886 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7887 if (pFpuCtx->FTW & RT_BIT(iReg))
7888 {
7889 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7890 return VINF_SUCCESS;
7891 }
7892 return VERR_NOT_FOUND;
7893}
7894
7895
7896IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7897 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7898{
7899 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7900 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7901 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7902 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7903 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7904 {
7905 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7906 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7907 return VINF_SUCCESS;
7908 }
7909 return VERR_NOT_FOUND;
7910}
7911
7912
7913IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7914{
7915 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7916 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7917 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7918 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7919 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7920 {
7921 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7922 return VINF_SUCCESS;
7923 }
7924 return VERR_NOT_FOUND;
7925}
7926
7927
7928/**
7929 * Updates the FPU exception status after FCW is changed.
7930 *
7931 * @param pFpuCtx The FPU context.
7932 */
7933IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7934{
7935 uint16_t u16Fsw = pFpuCtx->FSW;
7936 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7937 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7938 else
7939 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7940 pFpuCtx->FSW = u16Fsw;
7941}
7942
7943
7944/**
7945 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7946 *
7947 * @returns The full FTW.
7948 * @param pFpuCtx The FPU context.
7949 */
7950IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7951{
7952 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7953 uint16_t u16Ftw = 0;
7954 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7955 for (unsigned iSt = 0; iSt < 8; iSt++)
7956 {
7957 unsigned const iReg = (iSt + iTop) & 7;
7958 if (!(u8Ftw & RT_BIT(iReg)))
7959 u16Ftw |= 3 << (iReg * 2); /* empty */
7960 else
7961 {
7962 uint16_t uTag;
7963 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7964 if (pr80Reg->s.uExponent == 0x7fff)
7965 uTag = 2; /* Exponent is all 1's => Special. */
7966 else if (pr80Reg->s.uExponent == 0x0000)
7967 {
7968 if (pr80Reg->s.u64Mantissa == 0x0000)
7969 uTag = 1; /* All bits are zero => Zero. */
7970 else
7971 uTag = 2; /* Must be special. */
7972 }
7973 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7974 uTag = 0; /* Valid. */
7975 else
7976 uTag = 2; /* Must be special. */
7977
7978 u16Ftw |= uTag << (iReg * 2); /* empty */
7979 }
7980 }
7981
7982 return u16Ftw;
7983}
7984
7985
7986/**
7987 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7988 *
7989 * @returns The compressed FTW.
7990 * @param u16FullFtw The full FTW to convert.
7991 */
7992IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7993{
7994 uint8_t u8Ftw = 0;
7995 for (unsigned i = 0; i < 8; i++)
7996 {
7997 if ((u16FullFtw & 3) != 3 /*empty*/)
7998 u8Ftw |= RT_BIT(i);
7999 u16FullFtw >>= 2;
8000 }
8001
8002 return u8Ftw;
8003}
8004
8005/** @} */
8006
8007
8008/** @name Memory access.
8009 *
8010 * @{
8011 */
8012
8013
8014/**
8015 * Updates the IEMCPU::cbWritten counter if applicable.
8016 *
8017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8018 * @param fAccess The access being accounted for.
8019 * @param cbMem The access size.
8020 */
8021DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
8022{
8023 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8024 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8025 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8026}
8027
8028
8029/**
8030 * Checks if the given segment can be written to, raise the appropriate
8031 * exception if not.
8032 *
8033 * @returns VBox strict status code.
8034 *
8035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8036 * @param pHid Pointer to the hidden register.
8037 * @param iSegReg The register number.
8038 * @param pu64BaseAddr Where to return the base address to use for the
8039 * segment. (In 64-bit code it may differ from the
8040 * base in the hidden segment.)
8041 */
8042IEM_STATIC VBOXSTRICTRC
8043iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8044{
8045 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8046
8047 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8048 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8049 else
8050 {
8051 if (!pHid->Attr.n.u1Present)
8052 {
8053 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8054 AssertRelease(uSel == 0);
8055 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8056 return iemRaiseGeneralProtectionFault0(pVCpu);
8057 }
8058
8059 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8060 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8061 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8062 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8063 *pu64BaseAddr = pHid->u64Base;
8064 }
8065 return VINF_SUCCESS;
8066}
8067
8068
8069/**
8070 * Checks if the given segment can be read from, raise the appropriate
8071 * exception if not.
8072 *
8073 * @returns VBox strict status code.
8074 *
8075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8076 * @param pHid Pointer to the hidden register.
8077 * @param iSegReg The register number.
8078 * @param pu64BaseAddr Where to return the base address to use for the
8079 * segment. (In 64-bit code it may differ from the
8080 * base in the hidden segment.)
8081 */
8082IEM_STATIC VBOXSTRICTRC
8083iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8084{
8085 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8086
8087 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8088 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8089 else
8090 {
8091 if (!pHid->Attr.n.u1Present)
8092 {
8093 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8094 AssertRelease(uSel == 0);
8095 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8096 return iemRaiseGeneralProtectionFault0(pVCpu);
8097 }
8098
8099 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8100 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8101 *pu64BaseAddr = pHid->u64Base;
8102 }
8103 return VINF_SUCCESS;
8104}
8105
8106
8107/**
8108 * Applies the segment limit, base and attributes.
8109 *
8110 * This may raise a \#GP or \#SS.
8111 *
8112 * @returns VBox strict status code.
8113 *
8114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8115 * @param fAccess The kind of access which is being performed.
8116 * @param iSegReg The index of the segment register to apply.
8117 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8118 * TSS, ++).
8119 * @param cbMem The access size.
8120 * @param pGCPtrMem Pointer to the guest memory address to apply
8121 * segmentation to. Input and output parameter.
8122 */
8123IEM_STATIC VBOXSTRICTRC
8124iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8125{
8126 if (iSegReg == UINT8_MAX)
8127 return VINF_SUCCESS;
8128
8129 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8130 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8131 switch (pVCpu->iem.s.enmCpuMode)
8132 {
8133 case IEMMODE_16BIT:
8134 case IEMMODE_32BIT:
8135 {
8136 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8137 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8138
8139 if ( pSel->Attr.n.u1Present
8140 && !pSel->Attr.n.u1Unusable)
8141 {
8142 Assert(pSel->Attr.n.u1DescType);
8143 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8144 {
8145 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8146 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8147 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8148
8149 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8150 {
8151 /** @todo CPL check. */
8152 }
8153
8154 /*
8155 * There are two kinds of data selectors, normal and expand down.
8156 */
8157 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8158 {
8159 if ( GCPtrFirst32 > pSel->u32Limit
8160 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8161 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8162 }
8163 else
8164 {
8165 /*
8166 * The upper boundary is defined by the B bit, not the G bit!
8167 */
8168 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8169 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8170 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8171 }
8172 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8173 }
8174 else
8175 {
8176
8177 /*
8178 * Code selector and usually be used to read thru, writing is
8179 * only permitted in real and V8086 mode.
8180 */
8181 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8182 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8183 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8184 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8185 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8186
8187 if ( GCPtrFirst32 > pSel->u32Limit
8188 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8189 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8190
8191 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8192 {
8193 /** @todo CPL check. */
8194 }
8195
8196 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8197 }
8198 }
8199 else
8200 return iemRaiseGeneralProtectionFault0(pVCpu);
8201 return VINF_SUCCESS;
8202 }
8203
8204 case IEMMODE_64BIT:
8205 {
8206 RTGCPTR GCPtrMem = *pGCPtrMem;
8207 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8208 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8209
8210 Assert(cbMem >= 1);
8211 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8212 return VINF_SUCCESS;
8213 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8214 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8215 return iemRaiseGeneralProtectionFault0(pVCpu);
8216 }
8217
8218 default:
8219 AssertFailedReturn(VERR_IEM_IPE_7);
8220 }
8221}
8222
8223
8224/**
8225 * Translates a virtual address to a physical physical address and checks if we
8226 * can access the page as specified.
8227 *
8228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8229 * @param GCPtrMem The virtual address.
8230 * @param fAccess The intended access.
8231 * @param pGCPhysMem Where to return the physical address.
8232 */
8233IEM_STATIC VBOXSTRICTRC
8234iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8235{
8236 /** @todo Need a different PGM interface here. We're currently using
8237 * generic / REM interfaces. this won't cut it for R0 & RC. */
8238 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8239 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8240 RTGCPHYS GCPhys;
8241 uint64_t fFlags;
8242 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8243 if (RT_FAILURE(rc))
8244 {
8245 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8246 /** @todo Check unassigned memory in unpaged mode. */
8247 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8248 *pGCPhysMem = NIL_RTGCPHYS;
8249 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8250 }
8251
8252 /* If the page is writable and does not have the no-exec bit set, all
8253 access is allowed. Otherwise we'll have to check more carefully... */
8254 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8255 {
8256 /* Write to read only memory? */
8257 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8258 && !(fFlags & X86_PTE_RW)
8259 && ( (pVCpu->iem.s.uCpl == 3
8260 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8261 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8262 {
8263 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8264 *pGCPhysMem = NIL_RTGCPHYS;
8265 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8266 }
8267
8268 /* Kernel memory accessed by userland? */
8269 if ( !(fFlags & X86_PTE_US)
8270 && pVCpu->iem.s.uCpl == 3
8271 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8272 {
8273 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8274 *pGCPhysMem = NIL_RTGCPHYS;
8275 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8276 }
8277
8278 /* Executing non-executable memory? */
8279 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8280 && (fFlags & X86_PTE_PAE_NX)
8281 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8282 {
8283 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8284 *pGCPhysMem = NIL_RTGCPHYS;
8285 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8286 VERR_ACCESS_DENIED);
8287 }
8288 }
8289
8290 /*
8291 * Set the dirty / access flags.
8292 * ASSUMES this is set when the address is translated rather than on committ...
8293 */
8294 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8295 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8296 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8297 {
8298 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8299 AssertRC(rc2);
8300 }
8301
8302 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8303 *pGCPhysMem = GCPhys;
8304 return VINF_SUCCESS;
8305}
8306
8307
8308
8309/**
8310 * Maps a physical page.
8311 *
8312 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8314 * @param GCPhysMem The physical address.
8315 * @param fAccess The intended access.
8316 * @param ppvMem Where to return the mapping address.
8317 * @param pLock The PGM lock.
8318 */
8319IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8320{
8321#ifdef IEM_LOG_MEMORY_WRITES
8322 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8323 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8324#endif
8325
8326 /** @todo This API may require some improving later. A private deal with PGM
8327 * regarding locking and unlocking needs to be struct. A couple of TLBs
8328 * living in PGM, but with publicly accessible inlined access methods
8329 * could perhaps be an even better solution. */
8330 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8331 GCPhysMem,
8332 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8333 pVCpu->iem.s.fBypassHandlers,
8334 ppvMem,
8335 pLock);
8336 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8337 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8338
8339 return rc;
8340}
8341
8342
8343/**
8344 * Unmap a page previously mapped by iemMemPageMap.
8345 *
8346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8347 * @param GCPhysMem The physical address.
8348 * @param fAccess The intended access.
8349 * @param pvMem What iemMemPageMap returned.
8350 * @param pLock The PGM lock.
8351 */
8352DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8353{
8354 NOREF(pVCpu);
8355 NOREF(GCPhysMem);
8356 NOREF(fAccess);
8357 NOREF(pvMem);
8358 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8359}
8360
8361
8362/**
8363 * Looks up a memory mapping entry.
8364 *
8365 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8367 * @param pvMem The memory address.
8368 * @param fAccess The access to.
8369 */
8370DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8371{
8372 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8373 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8374 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8375 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8376 return 0;
8377 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8378 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8379 return 1;
8380 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8381 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8382 return 2;
8383 return VERR_NOT_FOUND;
8384}
8385
8386
8387/**
8388 * Finds a free memmap entry when using iNextMapping doesn't work.
8389 *
8390 * @returns Memory mapping index, 1024 on failure.
8391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8392 */
8393IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8394{
8395 /*
8396 * The easy case.
8397 */
8398 if (pVCpu->iem.s.cActiveMappings == 0)
8399 {
8400 pVCpu->iem.s.iNextMapping = 1;
8401 return 0;
8402 }
8403
8404 /* There should be enough mappings for all instructions. */
8405 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8406
8407 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8408 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8409 return i;
8410
8411 AssertFailedReturn(1024);
8412}
8413
8414
8415/**
8416 * Commits a bounce buffer that needs writing back and unmaps it.
8417 *
8418 * @returns Strict VBox status code.
8419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8420 * @param iMemMap The index of the buffer to commit.
8421 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8422 * Always false in ring-3, obviously.
8423 */
8424IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8425{
8426 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8427 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8428#ifdef IN_RING3
8429 Assert(!fPostponeFail);
8430 RT_NOREF_PV(fPostponeFail);
8431#endif
8432
8433 /*
8434 * Do the writing.
8435 */
8436 PVM pVM = pVCpu->CTX_SUFF(pVM);
8437 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8438 {
8439 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8440 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8441 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8442 if (!pVCpu->iem.s.fBypassHandlers)
8443 {
8444 /*
8445 * Carefully and efficiently dealing with access handler return
8446 * codes make this a little bloated.
8447 */
8448 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8450 pbBuf,
8451 cbFirst,
8452 PGMACCESSORIGIN_IEM);
8453 if (rcStrict == VINF_SUCCESS)
8454 {
8455 if (cbSecond)
8456 {
8457 rcStrict = PGMPhysWrite(pVM,
8458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8459 pbBuf + cbFirst,
8460 cbSecond,
8461 PGMACCESSORIGIN_IEM);
8462 if (rcStrict == VINF_SUCCESS)
8463 { /* nothing */ }
8464 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8465 {
8466 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8469 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8470 }
8471#ifndef IN_RING3
8472 else if (fPostponeFail)
8473 {
8474 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8476 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8477 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8478 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8479 return iemSetPassUpStatus(pVCpu, rcStrict);
8480 }
8481#endif
8482 else
8483 {
8484 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8485 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8486 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8487 return rcStrict;
8488 }
8489 }
8490 }
8491 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8492 {
8493 if (!cbSecond)
8494 {
8495 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8496 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8497 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8498 }
8499 else
8500 {
8501 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8503 pbBuf + cbFirst,
8504 cbSecond,
8505 PGMACCESSORIGIN_IEM);
8506 if (rcStrict2 == VINF_SUCCESS)
8507 {
8508 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8509 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8510 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8511 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8512 }
8513 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8514 {
8515 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8516 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8517 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8518 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8519 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8520 }
8521#ifndef IN_RING3
8522 else if (fPostponeFail)
8523 {
8524 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8525 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8526 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8527 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8528 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8529 return iemSetPassUpStatus(pVCpu, rcStrict);
8530 }
8531#endif
8532 else
8533 {
8534 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8535 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8536 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8537 return rcStrict2;
8538 }
8539 }
8540 }
8541#ifndef IN_RING3
8542 else if (fPostponeFail)
8543 {
8544 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8545 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8546 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8547 if (!cbSecond)
8548 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8549 else
8550 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8551 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8552 return iemSetPassUpStatus(pVCpu, rcStrict);
8553 }
8554#endif
8555 else
8556 {
8557 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8558 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8559 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8560 return rcStrict;
8561 }
8562 }
8563 else
8564 {
8565 /*
8566 * No access handlers, much simpler.
8567 */
8568 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8569 if (RT_SUCCESS(rc))
8570 {
8571 if (cbSecond)
8572 {
8573 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8574 if (RT_SUCCESS(rc))
8575 { /* likely */ }
8576 else
8577 {
8578 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8579 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8580 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8581 return rc;
8582 }
8583 }
8584 }
8585 else
8586 {
8587 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8588 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8589 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8590 return rc;
8591 }
8592 }
8593 }
8594
8595#if defined(IEM_LOG_MEMORY_WRITES)
8596 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8597 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8598 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8599 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8600 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8601 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8602
8603 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8604 g_cbIemWrote = cbWrote;
8605 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8606#endif
8607
8608 /*
8609 * Free the mapping entry.
8610 */
8611 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8612 Assert(pVCpu->iem.s.cActiveMappings != 0);
8613 pVCpu->iem.s.cActiveMappings--;
8614 return VINF_SUCCESS;
8615}
8616
8617
8618/**
8619 * iemMemMap worker that deals with a request crossing pages.
8620 */
8621IEM_STATIC VBOXSTRICTRC
8622iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8623{
8624 /*
8625 * Do the address translations.
8626 */
8627 RTGCPHYS GCPhysFirst;
8628 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8629 if (rcStrict != VINF_SUCCESS)
8630 return rcStrict;
8631
8632 RTGCPHYS GCPhysSecond;
8633 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8634 fAccess, &GCPhysSecond);
8635 if (rcStrict != VINF_SUCCESS)
8636 return rcStrict;
8637 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8638
8639 PVM pVM = pVCpu->CTX_SUFF(pVM);
8640
8641 /*
8642 * Read in the current memory content if it's a read, execute or partial
8643 * write access.
8644 */
8645 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8646 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8647 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8648
8649 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8650 {
8651 if (!pVCpu->iem.s.fBypassHandlers)
8652 {
8653 /*
8654 * Must carefully deal with access handler status codes here,
8655 * makes the code a bit bloated.
8656 */
8657 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8658 if (rcStrict == VINF_SUCCESS)
8659 {
8660 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8661 if (rcStrict == VINF_SUCCESS)
8662 { /*likely */ }
8663 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8664 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8665 else
8666 {
8667 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8668 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8669 return rcStrict;
8670 }
8671 }
8672 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8673 {
8674 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8675 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8676 {
8677 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8678 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8679 }
8680 else
8681 {
8682 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8683 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8684 return rcStrict2;
8685 }
8686 }
8687 else
8688 {
8689 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8690 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8691 return rcStrict;
8692 }
8693 }
8694 else
8695 {
8696 /*
8697 * No informational status codes here, much more straight forward.
8698 */
8699 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8700 if (RT_SUCCESS(rc))
8701 {
8702 Assert(rc == VINF_SUCCESS);
8703 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8704 if (RT_SUCCESS(rc))
8705 Assert(rc == VINF_SUCCESS);
8706 else
8707 {
8708 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8709 return rc;
8710 }
8711 }
8712 else
8713 {
8714 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8715 return rc;
8716 }
8717 }
8718 }
8719#ifdef VBOX_STRICT
8720 else
8721 memset(pbBuf, 0xcc, cbMem);
8722 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8723 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8724#endif
8725
8726 /*
8727 * Commit the bounce buffer entry.
8728 */
8729 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8730 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8731 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8732 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8733 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8734 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8735 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8736 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8737 pVCpu->iem.s.cActiveMappings++;
8738
8739 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8740 *ppvMem = pbBuf;
8741 return VINF_SUCCESS;
8742}
8743
8744
8745/**
8746 * iemMemMap woker that deals with iemMemPageMap failures.
8747 */
8748IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8749 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8750{
8751 /*
8752 * Filter out conditions we can handle and the ones which shouldn't happen.
8753 */
8754 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8755 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8756 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8757 {
8758 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8759 return rcMap;
8760 }
8761 pVCpu->iem.s.cPotentialExits++;
8762
8763 /*
8764 * Read in the current memory content if it's a read, execute or partial
8765 * write access.
8766 */
8767 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8768 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8769 {
8770 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8771 memset(pbBuf, 0xff, cbMem);
8772 else
8773 {
8774 int rc;
8775 if (!pVCpu->iem.s.fBypassHandlers)
8776 {
8777 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8778 if (rcStrict == VINF_SUCCESS)
8779 { /* nothing */ }
8780 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8781 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8782 else
8783 {
8784 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8785 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8786 return rcStrict;
8787 }
8788 }
8789 else
8790 {
8791 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8792 if (RT_SUCCESS(rc))
8793 { /* likely */ }
8794 else
8795 {
8796 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8797 GCPhysFirst, rc));
8798 return rc;
8799 }
8800 }
8801 }
8802 }
8803#ifdef VBOX_STRICT
8804 else
8805 memset(pbBuf, 0xcc, cbMem);
8806#endif
8807#ifdef VBOX_STRICT
8808 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8809 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8810#endif
8811
8812 /*
8813 * Commit the bounce buffer entry.
8814 */
8815 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8816 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8817 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8818 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8819 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8820 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8821 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8822 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8823 pVCpu->iem.s.cActiveMappings++;
8824
8825 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8826 *ppvMem = pbBuf;
8827 return VINF_SUCCESS;
8828}
8829
8830
8831
8832/**
8833 * Maps the specified guest memory for the given kind of access.
8834 *
8835 * This may be using bounce buffering of the memory if it's crossing a page
8836 * boundary or if there is an access handler installed for any of it. Because
8837 * of lock prefix guarantees, we're in for some extra clutter when this
8838 * happens.
8839 *
8840 * This may raise a \#GP, \#SS, \#PF or \#AC.
8841 *
8842 * @returns VBox strict status code.
8843 *
8844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8845 * @param ppvMem Where to return the pointer to the mapped
8846 * memory.
8847 * @param cbMem The number of bytes to map. This is usually 1,
8848 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8849 * string operations it can be up to a page.
8850 * @param iSegReg The index of the segment register to use for
8851 * this access. The base and limits are checked.
8852 * Use UINT8_MAX to indicate that no segmentation
8853 * is required (for IDT, GDT and LDT accesses).
8854 * @param GCPtrMem The address of the guest memory.
8855 * @param fAccess How the memory is being accessed. The
8856 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8857 * how to map the memory, while the
8858 * IEM_ACCESS_WHAT_XXX bit is used when raising
8859 * exceptions.
8860 */
8861IEM_STATIC VBOXSTRICTRC
8862iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8863{
8864 /*
8865 * Check the input and figure out which mapping entry to use.
8866 */
8867 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8868 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8869 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8870
8871 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8872 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8873 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8874 {
8875 iMemMap = iemMemMapFindFree(pVCpu);
8876 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8877 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8878 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8879 pVCpu->iem.s.aMemMappings[2].fAccess),
8880 VERR_IEM_IPE_9);
8881 }
8882
8883 /*
8884 * Map the memory, checking that we can actually access it. If something
8885 * slightly complicated happens, fall back on bounce buffering.
8886 */
8887 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8888 if (rcStrict != VINF_SUCCESS)
8889 return rcStrict;
8890
8891 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8892 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8893
8894 RTGCPHYS GCPhysFirst;
8895 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8896 if (rcStrict != VINF_SUCCESS)
8897 return rcStrict;
8898
8899 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8900 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8901 if (fAccess & IEM_ACCESS_TYPE_READ)
8902 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8903
8904 void *pvMem;
8905 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8906 if (rcStrict != VINF_SUCCESS)
8907 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8908
8909 /*
8910 * Fill in the mapping table entry.
8911 */
8912 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8913 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8914 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8915 pVCpu->iem.s.cActiveMappings++;
8916
8917 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8918 *ppvMem = pvMem;
8919
8920 return VINF_SUCCESS;
8921}
8922
8923
8924/**
8925 * Commits the guest memory if bounce buffered and unmaps it.
8926 *
8927 * @returns Strict VBox status code.
8928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8929 * @param pvMem The mapping.
8930 * @param fAccess The kind of access.
8931 */
8932IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8933{
8934 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8935 AssertReturn(iMemMap >= 0, iMemMap);
8936
8937 /* If it's bounce buffered, we may need to write back the buffer. */
8938 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8939 {
8940 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8941 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8942 }
8943 /* Otherwise unlock it. */
8944 else
8945 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8946
8947 /* Free the entry. */
8948 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8949 Assert(pVCpu->iem.s.cActiveMappings != 0);
8950 pVCpu->iem.s.cActiveMappings--;
8951 return VINF_SUCCESS;
8952}
8953
8954#ifdef IEM_WITH_SETJMP
8955
8956/**
8957 * Maps the specified guest memory for the given kind of access, longjmp on
8958 * error.
8959 *
8960 * This may be using bounce buffering of the memory if it's crossing a page
8961 * boundary or if there is an access handler installed for any of it. Because
8962 * of lock prefix guarantees, we're in for some extra clutter when this
8963 * happens.
8964 *
8965 * This may raise a \#GP, \#SS, \#PF or \#AC.
8966 *
8967 * @returns Pointer to the mapped memory.
8968 *
8969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8970 * @param cbMem The number of bytes to map. This is usually 1,
8971 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8972 * string operations it can be up to a page.
8973 * @param iSegReg The index of the segment register to use for
8974 * this access. The base and limits are checked.
8975 * Use UINT8_MAX to indicate that no segmentation
8976 * is required (for IDT, GDT and LDT accesses).
8977 * @param GCPtrMem The address of the guest memory.
8978 * @param fAccess How the memory is being accessed. The
8979 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8980 * how to map the memory, while the
8981 * IEM_ACCESS_WHAT_XXX bit is used when raising
8982 * exceptions.
8983 */
8984IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8985{
8986 /*
8987 * Check the input and figure out which mapping entry to use.
8988 */
8989 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8990 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8991 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8992
8993 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8994 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8995 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8996 {
8997 iMemMap = iemMemMapFindFree(pVCpu);
8998 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8999 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9000 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9001 pVCpu->iem.s.aMemMappings[2].fAccess),
9002 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9003 }
9004
9005 /*
9006 * Map the memory, checking that we can actually access it. If something
9007 * slightly complicated happens, fall back on bounce buffering.
9008 */
9009 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9010 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9011 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9012
9013 /* Crossing a page boundary? */
9014 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9015 { /* No (likely). */ }
9016 else
9017 {
9018 void *pvMem;
9019 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9020 if (rcStrict == VINF_SUCCESS)
9021 return pvMem;
9022 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9023 }
9024
9025 RTGCPHYS GCPhysFirst;
9026 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9027 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9028 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9029
9030 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9031 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9032 if (fAccess & IEM_ACCESS_TYPE_READ)
9033 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9034
9035 void *pvMem;
9036 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9037 if (rcStrict == VINF_SUCCESS)
9038 { /* likely */ }
9039 else
9040 {
9041 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9042 if (rcStrict == VINF_SUCCESS)
9043 return pvMem;
9044 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9045 }
9046
9047 /*
9048 * Fill in the mapping table entry.
9049 */
9050 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9051 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9052 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9053 pVCpu->iem.s.cActiveMappings++;
9054
9055 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9056 return pvMem;
9057}
9058
9059
9060/**
9061 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9062 *
9063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9064 * @param pvMem The mapping.
9065 * @param fAccess The kind of access.
9066 */
9067IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9068{
9069 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9070 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9071
9072 /* If it's bounce buffered, we may need to write back the buffer. */
9073 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9074 {
9075 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9076 {
9077 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9078 if (rcStrict == VINF_SUCCESS)
9079 return;
9080 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9081 }
9082 }
9083 /* Otherwise unlock it. */
9084 else
9085 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9086
9087 /* Free the entry. */
9088 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9089 Assert(pVCpu->iem.s.cActiveMappings != 0);
9090 pVCpu->iem.s.cActiveMappings--;
9091}
9092
9093#endif /* IEM_WITH_SETJMP */
9094
9095#ifndef IN_RING3
9096/**
9097 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9098 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9099 *
9100 * Allows the instruction to be completed and retired, while the IEM user will
9101 * return to ring-3 immediately afterwards and do the postponed writes there.
9102 *
9103 * @returns VBox status code (no strict statuses). Caller must check
9104 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9106 * @param pvMem The mapping.
9107 * @param fAccess The kind of access.
9108 */
9109IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9110{
9111 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9112 AssertReturn(iMemMap >= 0, iMemMap);
9113
9114 /* If it's bounce buffered, we may need to write back the buffer. */
9115 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9116 {
9117 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9118 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9119 }
9120 /* Otherwise unlock it. */
9121 else
9122 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9123
9124 /* Free the entry. */
9125 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9126 Assert(pVCpu->iem.s.cActiveMappings != 0);
9127 pVCpu->iem.s.cActiveMappings--;
9128 return VINF_SUCCESS;
9129}
9130#endif
9131
9132
9133/**
9134 * Rollbacks mappings, releasing page locks and such.
9135 *
9136 * The caller shall only call this after checking cActiveMappings.
9137 *
9138 * @returns Strict VBox status code to pass up.
9139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9140 */
9141IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9142{
9143 Assert(pVCpu->iem.s.cActiveMappings > 0);
9144
9145 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9146 while (iMemMap-- > 0)
9147 {
9148 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9149 if (fAccess != IEM_ACCESS_INVALID)
9150 {
9151 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9152 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9153 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9154 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9155 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9156 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9157 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9159 pVCpu->iem.s.cActiveMappings--;
9160 }
9161 }
9162}
9163
9164
9165/**
9166 * Fetches a data byte.
9167 *
9168 * @returns Strict VBox status code.
9169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9170 * @param pu8Dst Where to return the byte.
9171 * @param iSegReg The index of the segment register to use for
9172 * this access. The base and limits are checked.
9173 * @param GCPtrMem The address of the guest memory.
9174 */
9175IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9176{
9177 /* The lazy approach for now... */
9178 uint8_t const *pu8Src;
9179 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9180 if (rc == VINF_SUCCESS)
9181 {
9182 *pu8Dst = *pu8Src;
9183 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9184 }
9185 return rc;
9186}
9187
9188
9189#ifdef IEM_WITH_SETJMP
9190/**
9191 * Fetches a data byte, longjmp on error.
9192 *
9193 * @returns The byte.
9194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9195 * @param iSegReg The index of the segment register to use for
9196 * this access. The base and limits are checked.
9197 * @param GCPtrMem The address of the guest memory.
9198 */
9199DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9200{
9201 /* The lazy approach for now... */
9202 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9203 uint8_t const bRet = *pu8Src;
9204 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9205 return bRet;
9206}
9207#endif /* IEM_WITH_SETJMP */
9208
9209
9210/**
9211 * Fetches a data word.
9212 *
9213 * @returns Strict VBox status code.
9214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9215 * @param pu16Dst Where to return the word.
9216 * @param iSegReg The index of the segment register to use for
9217 * this access. The base and limits are checked.
9218 * @param GCPtrMem The address of the guest memory.
9219 */
9220IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9221{
9222 /* The lazy approach for now... */
9223 uint16_t const *pu16Src;
9224 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9225 if (rc == VINF_SUCCESS)
9226 {
9227 *pu16Dst = *pu16Src;
9228 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9229 }
9230 return rc;
9231}
9232
9233
9234#ifdef IEM_WITH_SETJMP
9235/**
9236 * Fetches a data word, longjmp on error.
9237 *
9238 * @returns The word
9239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9240 * @param iSegReg The index of the segment register to use for
9241 * this access. The base and limits are checked.
9242 * @param GCPtrMem The address of the guest memory.
9243 */
9244DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9245{
9246 /* The lazy approach for now... */
9247 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9248 uint16_t const u16Ret = *pu16Src;
9249 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9250 return u16Ret;
9251}
9252#endif
9253
9254
9255/**
9256 * Fetches a data dword.
9257 *
9258 * @returns Strict VBox status code.
9259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9260 * @param pu32Dst Where to return the dword.
9261 * @param iSegReg The index of the segment register to use for
9262 * this access. The base and limits are checked.
9263 * @param GCPtrMem The address of the guest memory.
9264 */
9265IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9266{
9267 /* The lazy approach for now... */
9268 uint32_t const *pu32Src;
9269 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9270 if (rc == VINF_SUCCESS)
9271 {
9272 *pu32Dst = *pu32Src;
9273 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9274 }
9275 return rc;
9276}
9277
9278
9279#ifdef IEM_WITH_SETJMP
9280
9281IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9282{
9283 Assert(cbMem >= 1);
9284 Assert(iSegReg < X86_SREG_COUNT);
9285
9286 /*
9287 * 64-bit mode is simpler.
9288 */
9289 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9290 {
9291 if (iSegReg >= X86_SREG_FS)
9292 {
9293 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9294 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9295 GCPtrMem += pSel->u64Base;
9296 }
9297
9298 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9299 return GCPtrMem;
9300 }
9301 /*
9302 * 16-bit and 32-bit segmentation.
9303 */
9304 else
9305 {
9306 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9307 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9308 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9309 == X86DESCATTR_P /* data, expand up */
9310 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9311 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9312 {
9313 /* expand up */
9314 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9315 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9316 && GCPtrLast32 > (uint32_t)GCPtrMem))
9317 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9318 }
9319 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9320 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9321 {
9322 /* expand down */
9323 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9324 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9325 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9326 && GCPtrLast32 > (uint32_t)GCPtrMem))
9327 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9328 }
9329 else
9330 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9331 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9332 }
9333 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9334}
9335
9336
9337IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9338{
9339 Assert(cbMem >= 1);
9340 Assert(iSegReg < X86_SREG_COUNT);
9341
9342 /*
9343 * 64-bit mode is simpler.
9344 */
9345 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9346 {
9347 if (iSegReg >= X86_SREG_FS)
9348 {
9349 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9350 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9351 GCPtrMem += pSel->u64Base;
9352 }
9353
9354 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9355 return GCPtrMem;
9356 }
9357 /*
9358 * 16-bit and 32-bit segmentation.
9359 */
9360 else
9361 {
9362 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9363 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9364 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9365 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9366 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9367 {
9368 /* expand up */
9369 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9370 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9371 && GCPtrLast32 > (uint32_t)GCPtrMem))
9372 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9373 }
9374 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9375 {
9376 /* expand down */
9377 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9378 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9379 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9380 && GCPtrLast32 > (uint32_t)GCPtrMem))
9381 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9382 }
9383 else
9384 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9385 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9386 }
9387 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9388}
9389
9390
9391/**
9392 * Fetches a data dword, longjmp on error, fallback/safe version.
9393 *
9394 * @returns The dword
9395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9396 * @param iSegReg The index of the segment register to use for
9397 * this access. The base and limits are checked.
9398 * @param GCPtrMem The address of the guest memory.
9399 */
9400IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9401{
9402 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9403 uint32_t const u32Ret = *pu32Src;
9404 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9405 return u32Ret;
9406}
9407
9408
9409/**
9410 * Fetches a data dword, longjmp on error.
9411 *
9412 * @returns The dword
9413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9414 * @param iSegReg The index of the segment register to use for
9415 * this access. The base and limits are checked.
9416 * @param GCPtrMem The address of the guest memory.
9417 */
9418DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9419{
9420# ifdef IEM_WITH_DATA_TLB
9421 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9422 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9423 {
9424 /// @todo more later.
9425 }
9426
9427 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9428# else
9429 /* The lazy approach. */
9430 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9431 uint32_t const u32Ret = *pu32Src;
9432 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9433 return u32Ret;
9434# endif
9435}
9436#endif
9437
9438
9439#ifdef SOME_UNUSED_FUNCTION
9440/**
9441 * Fetches a data dword and sign extends it to a qword.
9442 *
9443 * @returns Strict VBox status code.
9444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9445 * @param pu64Dst Where to return the sign extended value.
9446 * @param iSegReg The index of the segment register to use for
9447 * this access. The base and limits are checked.
9448 * @param GCPtrMem The address of the guest memory.
9449 */
9450IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9451{
9452 /* The lazy approach for now... */
9453 int32_t const *pi32Src;
9454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9455 if (rc == VINF_SUCCESS)
9456 {
9457 *pu64Dst = *pi32Src;
9458 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9459 }
9460#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9461 else
9462 *pu64Dst = 0;
9463#endif
9464 return rc;
9465}
9466#endif
9467
9468
9469/**
9470 * Fetches a data qword.
9471 *
9472 * @returns Strict VBox status code.
9473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9474 * @param pu64Dst Where to return the qword.
9475 * @param iSegReg The index of the segment register to use for
9476 * this access. The base and limits are checked.
9477 * @param GCPtrMem The address of the guest memory.
9478 */
9479IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9480{
9481 /* The lazy approach for now... */
9482 uint64_t const *pu64Src;
9483 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9484 if (rc == VINF_SUCCESS)
9485 {
9486 *pu64Dst = *pu64Src;
9487 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9488 }
9489 return rc;
9490}
9491
9492
9493#ifdef IEM_WITH_SETJMP
9494/**
9495 * Fetches a data qword, longjmp on error.
9496 *
9497 * @returns The qword.
9498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9499 * @param iSegReg The index of the segment register to use for
9500 * this access. The base and limits are checked.
9501 * @param GCPtrMem The address of the guest memory.
9502 */
9503DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9504{
9505 /* The lazy approach for now... */
9506 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9507 uint64_t const u64Ret = *pu64Src;
9508 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9509 return u64Ret;
9510}
9511#endif
9512
9513
9514/**
9515 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9516 *
9517 * @returns Strict VBox status code.
9518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9519 * @param pu64Dst Where to return the qword.
9520 * @param iSegReg The index of the segment register to use for
9521 * this access. The base and limits are checked.
9522 * @param GCPtrMem The address of the guest memory.
9523 */
9524IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9525{
9526 /* The lazy approach for now... */
9527 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9528 if (RT_UNLIKELY(GCPtrMem & 15))
9529 return iemRaiseGeneralProtectionFault0(pVCpu);
9530
9531 uint64_t const *pu64Src;
9532 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9533 if (rc == VINF_SUCCESS)
9534 {
9535 *pu64Dst = *pu64Src;
9536 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9537 }
9538 return rc;
9539}
9540
9541
9542#ifdef IEM_WITH_SETJMP
9543/**
9544 * Fetches a data qword, longjmp on error.
9545 *
9546 * @returns The qword.
9547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9548 * @param iSegReg The index of the segment register to use for
9549 * this access. The base and limits are checked.
9550 * @param GCPtrMem The address of the guest memory.
9551 */
9552DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9553{
9554 /* The lazy approach for now... */
9555 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9556 if (RT_LIKELY(!(GCPtrMem & 15)))
9557 {
9558 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9559 uint64_t const u64Ret = *pu64Src;
9560 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9561 return u64Ret;
9562 }
9563
9564 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9565 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9566}
9567#endif
9568
9569
9570/**
9571 * Fetches a data tword.
9572 *
9573 * @returns Strict VBox status code.
9574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9575 * @param pr80Dst Where to return the tword.
9576 * @param iSegReg The index of the segment register to use for
9577 * this access. The base and limits are checked.
9578 * @param GCPtrMem The address of the guest memory.
9579 */
9580IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9581{
9582 /* The lazy approach for now... */
9583 PCRTFLOAT80U pr80Src;
9584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9585 if (rc == VINF_SUCCESS)
9586 {
9587 *pr80Dst = *pr80Src;
9588 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9589 }
9590 return rc;
9591}
9592
9593
9594#ifdef IEM_WITH_SETJMP
9595/**
9596 * Fetches a data tword, longjmp on error.
9597 *
9598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9599 * @param pr80Dst Where to return the tword.
9600 * @param iSegReg The index of the segment register to use for
9601 * this access. The base and limits are checked.
9602 * @param GCPtrMem The address of the guest memory.
9603 */
9604DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9605{
9606 /* The lazy approach for now... */
9607 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9608 *pr80Dst = *pr80Src;
9609 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9610}
9611#endif
9612
9613
9614/**
9615 * Fetches a data dqword (double qword), generally SSE related.
9616 *
9617 * @returns Strict VBox status code.
9618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9619 * @param pu128Dst Where to return the qword.
9620 * @param iSegReg The index of the segment register to use for
9621 * this access. The base and limits are checked.
9622 * @param GCPtrMem The address of the guest memory.
9623 */
9624IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9625{
9626 /* The lazy approach for now... */
9627 PCRTUINT128U pu128Src;
9628 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9629 if (rc == VINF_SUCCESS)
9630 {
9631 pu128Dst->au64[0] = pu128Src->au64[0];
9632 pu128Dst->au64[1] = pu128Src->au64[1];
9633 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9634 }
9635 return rc;
9636}
9637
9638
9639#ifdef IEM_WITH_SETJMP
9640/**
9641 * Fetches a data dqword (double qword), generally SSE related.
9642 *
9643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9644 * @param pu128Dst Where to return the qword.
9645 * @param iSegReg The index of the segment register to use for
9646 * this access. The base and limits are checked.
9647 * @param GCPtrMem The address of the guest memory.
9648 */
9649IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9650{
9651 /* The lazy approach for now... */
9652 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9653 pu128Dst->au64[0] = pu128Src->au64[0];
9654 pu128Dst->au64[1] = pu128Src->au64[1];
9655 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9656}
9657#endif
9658
9659
9660/**
9661 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9662 * related.
9663 *
9664 * Raises \#GP(0) if not aligned.
9665 *
9666 * @returns Strict VBox status code.
9667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9668 * @param pu128Dst Where to return the qword.
9669 * @param iSegReg The index of the segment register to use for
9670 * this access. The base and limits are checked.
9671 * @param GCPtrMem The address of the guest memory.
9672 */
9673IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9674{
9675 /* The lazy approach for now... */
9676 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9677 if ( (GCPtrMem & 15)
9678 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9679 return iemRaiseGeneralProtectionFault0(pVCpu);
9680
9681 PCRTUINT128U pu128Src;
9682 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9683 if (rc == VINF_SUCCESS)
9684 {
9685 pu128Dst->au64[0] = pu128Src->au64[0];
9686 pu128Dst->au64[1] = pu128Src->au64[1];
9687 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9688 }
9689 return rc;
9690}
9691
9692
9693#ifdef IEM_WITH_SETJMP
9694/**
9695 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9696 * related, longjmp on error.
9697 *
9698 * Raises \#GP(0) if not aligned.
9699 *
9700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9701 * @param pu128Dst Where to return the qword.
9702 * @param iSegReg The index of the segment register to use for
9703 * this access. The base and limits are checked.
9704 * @param GCPtrMem The address of the guest memory.
9705 */
9706DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9707{
9708 /* The lazy approach for now... */
9709 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9710 if ( (GCPtrMem & 15) == 0
9711 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9712 {
9713 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9714 pu128Dst->au64[0] = pu128Src->au64[0];
9715 pu128Dst->au64[1] = pu128Src->au64[1];
9716 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9717 return;
9718 }
9719
9720 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9721 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9722}
9723#endif
9724
9725
9726/**
9727 * Fetches a data oword (octo word), generally AVX related.
9728 *
9729 * @returns Strict VBox status code.
9730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9731 * @param pu256Dst Where to return the qword.
9732 * @param iSegReg The index of the segment register to use for
9733 * this access. The base and limits are checked.
9734 * @param GCPtrMem The address of the guest memory.
9735 */
9736IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9737{
9738 /* The lazy approach for now... */
9739 PCRTUINT256U pu256Src;
9740 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9741 if (rc == VINF_SUCCESS)
9742 {
9743 pu256Dst->au64[0] = pu256Src->au64[0];
9744 pu256Dst->au64[1] = pu256Src->au64[1];
9745 pu256Dst->au64[2] = pu256Src->au64[2];
9746 pu256Dst->au64[3] = pu256Src->au64[3];
9747 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9748 }
9749 return rc;
9750}
9751
9752
9753#ifdef IEM_WITH_SETJMP
9754/**
9755 * Fetches a data oword (octo word), generally AVX related.
9756 *
9757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9758 * @param pu256Dst Where to return the qword.
9759 * @param iSegReg The index of the segment register to use for
9760 * this access. The base and limits are checked.
9761 * @param GCPtrMem The address of the guest memory.
9762 */
9763IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9764{
9765 /* The lazy approach for now... */
9766 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9767 pu256Dst->au64[0] = pu256Src->au64[0];
9768 pu256Dst->au64[1] = pu256Src->au64[1];
9769 pu256Dst->au64[2] = pu256Src->au64[2];
9770 pu256Dst->au64[3] = pu256Src->au64[3];
9771 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9772}
9773#endif
9774
9775
9776/**
9777 * Fetches a data oword (octo word) at an aligned address, generally AVX
9778 * related.
9779 *
9780 * Raises \#GP(0) if not aligned.
9781 *
9782 * @returns Strict VBox status code.
9783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9784 * @param pu256Dst Where to return the qword.
9785 * @param iSegReg The index of the segment register to use for
9786 * this access. The base and limits are checked.
9787 * @param GCPtrMem The address of the guest memory.
9788 */
9789IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9790{
9791 /* The lazy approach for now... */
9792 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9793 if (GCPtrMem & 31)
9794 return iemRaiseGeneralProtectionFault0(pVCpu);
9795
9796 PCRTUINT256U pu256Src;
9797 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9798 if (rc == VINF_SUCCESS)
9799 {
9800 pu256Dst->au64[0] = pu256Src->au64[0];
9801 pu256Dst->au64[1] = pu256Src->au64[1];
9802 pu256Dst->au64[2] = pu256Src->au64[2];
9803 pu256Dst->au64[3] = pu256Src->au64[3];
9804 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9805 }
9806 return rc;
9807}
9808
9809
9810#ifdef IEM_WITH_SETJMP
9811/**
9812 * Fetches a data oword (octo word) at an aligned address, generally AVX
9813 * related, longjmp on error.
9814 *
9815 * Raises \#GP(0) if not aligned.
9816 *
9817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9818 * @param pu256Dst Where to return the qword.
9819 * @param iSegReg The index of the segment register to use for
9820 * this access. The base and limits are checked.
9821 * @param GCPtrMem The address of the guest memory.
9822 */
9823DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9824{
9825 /* The lazy approach for now... */
9826 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9827 if ((GCPtrMem & 31) == 0)
9828 {
9829 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9830 pu256Dst->au64[0] = pu256Src->au64[0];
9831 pu256Dst->au64[1] = pu256Src->au64[1];
9832 pu256Dst->au64[2] = pu256Src->au64[2];
9833 pu256Dst->au64[3] = pu256Src->au64[3];
9834 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9835 return;
9836 }
9837
9838 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9839 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9840}
9841#endif
9842
9843
9844
9845/**
9846 * Fetches a descriptor register (lgdt, lidt).
9847 *
9848 * @returns Strict VBox status code.
9849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9850 * @param pcbLimit Where to return the limit.
9851 * @param pGCPtrBase Where to return the base.
9852 * @param iSegReg The index of the segment register to use for
9853 * this access. The base and limits are checked.
9854 * @param GCPtrMem The address of the guest memory.
9855 * @param enmOpSize The effective operand size.
9856 */
9857IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9858 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9859{
9860 /*
9861 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9862 * little special:
9863 * - The two reads are done separately.
9864 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9865 * - We suspect the 386 to actually commit the limit before the base in
9866 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9867 * don't try emulate this eccentric behavior, because it's not well
9868 * enough understood and rather hard to trigger.
9869 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9870 */
9871 VBOXSTRICTRC rcStrict;
9872 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9873 {
9874 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9875 if (rcStrict == VINF_SUCCESS)
9876 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9877 }
9878 else
9879 {
9880 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9881 if (enmOpSize == IEMMODE_32BIT)
9882 {
9883 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9884 {
9885 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9886 if (rcStrict == VINF_SUCCESS)
9887 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9888 }
9889 else
9890 {
9891 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9892 if (rcStrict == VINF_SUCCESS)
9893 {
9894 *pcbLimit = (uint16_t)uTmp;
9895 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9896 }
9897 }
9898 if (rcStrict == VINF_SUCCESS)
9899 *pGCPtrBase = uTmp;
9900 }
9901 else
9902 {
9903 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9904 if (rcStrict == VINF_SUCCESS)
9905 {
9906 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9907 if (rcStrict == VINF_SUCCESS)
9908 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9909 }
9910 }
9911 }
9912 return rcStrict;
9913}
9914
9915
9916
9917/**
9918 * Stores a data byte.
9919 *
9920 * @returns Strict VBox status code.
9921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9922 * @param iSegReg The index of the segment register to use for
9923 * this access. The base and limits are checked.
9924 * @param GCPtrMem The address of the guest memory.
9925 * @param u8Value The value to store.
9926 */
9927IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9928{
9929 /* The lazy approach for now... */
9930 uint8_t *pu8Dst;
9931 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9932 if (rc == VINF_SUCCESS)
9933 {
9934 *pu8Dst = u8Value;
9935 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9936 }
9937 return rc;
9938}
9939
9940
9941#ifdef IEM_WITH_SETJMP
9942/**
9943 * Stores a data byte, longjmp on error.
9944 *
9945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9946 * @param iSegReg The index of the segment register to use for
9947 * this access. The base and limits are checked.
9948 * @param GCPtrMem The address of the guest memory.
9949 * @param u8Value The value to store.
9950 */
9951IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9952{
9953 /* The lazy approach for now... */
9954 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9955 *pu8Dst = u8Value;
9956 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9957}
9958#endif
9959
9960
9961/**
9962 * Stores a data word.
9963 *
9964 * @returns Strict VBox status code.
9965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9966 * @param iSegReg The index of the segment register to use for
9967 * this access. The base and limits are checked.
9968 * @param GCPtrMem The address of the guest memory.
9969 * @param u16Value The value to store.
9970 */
9971IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9972{
9973 /* The lazy approach for now... */
9974 uint16_t *pu16Dst;
9975 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9976 if (rc == VINF_SUCCESS)
9977 {
9978 *pu16Dst = u16Value;
9979 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9980 }
9981 return rc;
9982}
9983
9984
9985#ifdef IEM_WITH_SETJMP
9986/**
9987 * Stores a data word, longjmp on error.
9988 *
9989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9990 * @param iSegReg The index of the segment register to use for
9991 * this access. The base and limits are checked.
9992 * @param GCPtrMem The address of the guest memory.
9993 * @param u16Value The value to store.
9994 */
9995IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9996{
9997 /* The lazy approach for now... */
9998 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9999 *pu16Dst = u16Value;
10000 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10001}
10002#endif
10003
10004
10005/**
10006 * Stores a data dword.
10007 *
10008 * @returns Strict VBox status code.
10009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10010 * @param iSegReg The index of the segment register to use for
10011 * this access. The base and limits are checked.
10012 * @param GCPtrMem The address of the guest memory.
10013 * @param u32Value The value to store.
10014 */
10015IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10016{
10017 /* The lazy approach for now... */
10018 uint32_t *pu32Dst;
10019 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10020 if (rc == VINF_SUCCESS)
10021 {
10022 *pu32Dst = u32Value;
10023 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10024 }
10025 return rc;
10026}
10027
10028
10029#ifdef IEM_WITH_SETJMP
10030/**
10031 * Stores a data dword.
10032 *
10033 * @returns Strict VBox status code.
10034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10035 * @param iSegReg The index of the segment register to use for
10036 * this access. The base and limits are checked.
10037 * @param GCPtrMem The address of the guest memory.
10038 * @param u32Value The value to store.
10039 */
10040IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10041{
10042 /* The lazy approach for now... */
10043 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10044 *pu32Dst = u32Value;
10045 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10046}
10047#endif
10048
10049
10050/**
10051 * Stores a data qword.
10052 *
10053 * @returns Strict VBox status code.
10054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10055 * @param iSegReg The index of the segment register to use for
10056 * this access. The base and limits are checked.
10057 * @param GCPtrMem The address of the guest memory.
10058 * @param u64Value The value to store.
10059 */
10060IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10061{
10062 /* The lazy approach for now... */
10063 uint64_t *pu64Dst;
10064 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10065 if (rc == VINF_SUCCESS)
10066 {
10067 *pu64Dst = u64Value;
10068 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10069 }
10070 return rc;
10071}
10072
10073
10074#ifdef IEM_WITH_SETJMP
10075/**
10076 * Stores a data qword, longjmp on error.
10077 *
10078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10079 * @param iSegReg The index of the segment register to use for
10080 * this access. The base and limits are checked.
10081 * @param GCPtrMem The address of the guest memory.
10082 * @param u64Value The value to store.
10083 */
10084IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10085{
10086 /* The lazy approach for now... */
10087 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10088 *pu64Dst = u64Value;
10089 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10090}
10091#endif
10092
10093
10094/**
10095 * Stores a data dqword.
10096 *
10097 * @returns Strict VBox status code.
10098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10099 * @param iSegReg The index of the segment register to use for
10100 * this access. The base and limits are checked.
10101 * @param GCPtrMem The address of the guest memory.
10102 * @param u128Value The value to store.
10103 */
10104IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10105{
10106 /* The lazy approach for now... */
10107 PRTUINT128U pu128Dst;
10108 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10109 if (rc == VINF_SUCCESS)
10110 {
10111 pu128Dst->au64[0] = u128Value.au64[0];
10112 pu128Dst->au64[1] = u128Value.au64[1];
10113 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10114 }
10115 return rc;
10116}
10117
10118
10119#ifdef IEM_WITH_SETJMP
10120/**
10121 * Stores a data dqword, longjmp on error.
10122 *
10123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10124 * @param iSegReg The index of the segment register to use for
10125 * this access. The base and limits are checked.
10126 * @param GCPtrMem The address of the guest memory.
10127 * @param u128Value The value to store.
10128 */
10129IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10130{
10131 /* The lazy approach for now... */
10132 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10133 pu128Dst->au64[0] = u128Value.au64[0];
10134 pu128Dst->au64[1] = u128Value.au64[1];
10135 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10136}
10137#endif
10138
10139
10140/**
10141 * Stores a data dqword, SSE aligned.
10142 *
10143 * @returns Strict VBox status code.
10144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10145 * @param iSegReg The index of the segment register to use for
10146 * this access. The base and limits are checked.
10147 * @param GCPtrMem The address of the guest memory.
10148 * @param u128Value The value to store.
10149 */
10150IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10151{
10152 /* The lazy approach for now... */
10153 if ( (GCPtrMem & 15)
10154 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10155 return iemRaiseGeneralProtectionFault0(pVCpu);
10156
10157 PRTUINT128U pu128Dst;
10158 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10159 if (rc == VINF_SUCCESS)
10160 {
10161 pu128Dst->au64[0] = u128Value.au64[0];
10162 pu128Dst->au64[1] = u128Value.au64[1];
10163 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10164 }
10165 return rc;
10166}
10167
10168
10169#ifdef IEM_WITH_SETJMP
10170/**
10171 * Stores a data dqword, SSE aligned.
10172 *
10173 * @returns Strict VBox status code.
10174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10175 * @param iSegReg The index of the segment register to use for
10176 * this access. The base and limits are checked.
10177 * @param GCPtrMem The address of the guest memory.
10178 * @param u128Value The value to store.
10179 */
10180DECL_NO_INLINE(IEM_STATIC, void)
10181iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10182{
10183 /* The lazy approach for now... */
10184 if ( (GCPtrMem & 15) == 0
10185 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10186 {
10187 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10188 pu128Dst->au64[0] = u128Value.au64[0];
10189 pu128Dst->au64[1] = u128Value.au64[1];
10190 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10191 return;
10192 }
10193
10194 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10195 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10196}
10197#endif
10198
10199
10200/**
10201 * Stores a data dqword.
10202 *
10203 * @returns Strict VBox status code.
10204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10205 * @param iSegReg The index of the segment register to use for
10206 * this access. The base and limits are checked.
10207 * @param GCPtrMem The address of the guest memory.
10208 * @param pu256Value Pointer to the value to store.
10209 */
10210IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10211{
10212 /* The lazy approach for now... */
10213 PRTUINT256U pu256Dst;
10214 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10215 if (rc == VINF_SUCCESS)
10216 {
10217 pu256Dst->au64[0] = pu256Value->au64[0];
10218 pu256Dst->au64[1] = pu256Value->au64[1];
10219 pu256Dst->au64[2] = pu256Value->au64[2];
10220 pu256Dst->au64[3] = pu256Value->au64[3];
10221 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10222 }
10223 return rc;
10224}
10225
10226
10227#ifdef IEM_WITH_SETJMP
10228/**
10229 * Stores a data dqword, longjmp on error.
10230 *
10231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10232 * @param iSegReg The index of the segment register to use for
10233 * this access. The base and limits are checked.
10234 * @param GCPtrMem The address of the guest memory.
10235 * @param pu256Value Pointer to the value to store.
10236 */
10237IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10238{
10239 /* The lazy approach for now... */
10240 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10241 pu256Dst->au64[0] = pu256Value->au64[0];
10242 pu256Dst->au64[1] = pu256Value->au64[1];
10243 pu256Dst->au64[2] = pu256Value->au64[2];
10244 pu256Dst->au64[3] = pu256Value->au64[3];
10245 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10246}
10247#endif
10248
10249
10250/**
10251 * Stores a data dqword, AVX aligned.
10252 *
10253 * @returns Strict VBox status code.
10254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10255 * @param iSegReg The index of the segment register to use for
10256 * this access. The base and limits are checked.
10257 * @param GCPtrMem The address of the guest memory.
10258 * @param pu256Value Pointer to the value to store.
10259 */
10260IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10261{
10262 /* The lazy approach for now... */
10263 if (GCPtrMem & 31)
10264 return iemRaiseGeneralProtectionFault0(pVCpu);
10265
10266 PRTUINT256U pu256Dst;
10267 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10268 if (rc == VINF_SUCCESS)
10269 {
10270 pu256Dst->au64[0] = pu256Value->au64[0];
10271 pu256Dst->au64[1] = pu256Value->au64[1];
10272 pu256Dst->au64[2] = pu256Value->au64[2];
10273 pu256Dst->au64[3] = pu256Value->au64[3];
10274 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10275 }
10276 return rc;
10277}
10278
10279
10280#ifdef IEM_WITH_SETJMP
10281/**
10282 * Stores a data dqword, AVX aligned.
10283 *
10284 * @returns Strict VBox status code.
10285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10286 * @param iSegReg The index of the segment register to use for
10287 * this access. The base and limits are checked.
10288 * @param GCPtrMem The address of the guest memory.
10289 * @param pu256Value Pointer to the value to store.
10290 */
10291DECL_NO_INLINE(IEM_STATIC, void)
10292iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10293{
10294 /* The lazy approach for now... */
10295 if ((GCPtrMem & 31) == 0)
10296 {
10297 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10298 pu256Dst->au64[0] = pu256Value->au64[0];
10299 pu256Dst->au64[1] = pu256Value->au64[1];
10300 pu256Dst->au64[2] = pu256Value->au64[2];
10301 pu256Dst->au64[3] = pu256Value->au64[3];
10302 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10303 return;
10304 }
10305
10306 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10307 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10308}
10309#endif
10310
10311
10312/**
10313 * Stores a descriptor register (sgdt, sidt).
10314 *
10315 * @returns Strict VBox status code.
10316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10317 * @param cbLimit The limit.
10318 * @param GCPtrBase The base address.
10319 * @param iSegReg The index of the segment register to use for
10320 * this access. The base and limits are checked.
10321 * @param GCPtrMem The address of the guest memory.
10322 */
10323IEM_STATIC VBOXSTRICTRC
10324iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10325{
10326 /*
10327 * The SIDT and SGDT instructions actually stores the data using two
10328 * independent writes. The instructions does not respond to opsize prefixes.
10329 */
10330 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10331 if (rcStrict == VINF_SUCCESS)
10332 {
10333 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10334 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10335 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10336 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10337 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10338 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10339 else
10340 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10341 }
10342 return rcStrict;
10343}
10344
10345
10346/**
10347 * Pushes a word onto the stack.
10348 *
10349 * @returns Strict VBox status code.
10350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10351 * @param u16Value The value to push.
10352 */
10353IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10354{
10355 /* Increment the stack pointer. */
10356 uint64_t uNewRsp;
10357 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10358
10359 /* Write the word the lazy way. */
10360 uint16_t *pu16Dst;
10361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10362 if (rc == VINF_SUCCESS)
10363 {
10364 *pu16Dst = u16Value;
10365 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10366 }
10367
10368 /* Commit the new RSP value unless we an access handler made trouble. */
10369 if (rc == VINF_SUCCESS)
10370 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10371
10372 return rc;
10373}
10374
10375
10376/**
10377 * Pushes a dword onto the stack.
10378 *
10379 * @returns Strict VBox status code.
10380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10381 * @param u32Value The value to push.
10382 */
10383IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10384{
10385 /* Increment the stack pointer. */
10386 uint64_t uNewRsp;
10387 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10388
10389 /* Write the dword the lazy way. */
10390 uint32_t *pu32Dst;
10391 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10392 if (rc == VINF_SUCCESS)
10393 {
10394 *pu32Dst = u32Value;
10395 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10396 }
10397
10398 /* Commit the new RSP value unless we an access handler made trouble. */
10399 if (rc == VINF_SUCCESS)
10400 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10401
10402 return rc;
10403}
10404
10405
10406/**
10407 * Pushes a dword segment register value onto the stack.
10408 *
10409 * @returns Strict VBox status code.
10410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10411 * @param u32Value The value to push.
10412 */
10413IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10414{
10415 /* Increment the stack pointer. */
10416 uint64_t uNewRsp;
10417 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10418
10419 /* The intel docs talks about zero extending the selector register
10420 value. My actual intel CPU here might be zero extending the value
10421 but it still only writes the lower word... */
10422 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10423 * happens when crossing an electric page boundrary, is the high word checked
10424 * for write accessibility or not? Probably it is. What about segment limits?
10425 * It appears this behavior is also shared with trap error codes.
10426 *
10427 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10428 * ancient hardware when it actually did change. */
10429 uint16_t *pu16Dst;
10430 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10431 if (rc == VINF_SUCCESS)
10432 {
10433 *pu16Dst = (uint16_t)u32Value;
10434 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10435 }
10436
10437 /* Commit the new RSP value unless we an access handler made trouble. */
10438 if (rc == VINF_SUCCESS)
10439 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10440
10441 return rc;
10442}
10443
10444
10445/**
10446 * Pushes a qword onto the stack.
10447 *
10448 * @returns Strict VBox status code.
10449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10450 * @param u64Value The value to push.
10451 */
10452IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10453{
10454 /* Increment the stack pointer. */
10455 uint64_t uNewRsp;
10456 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10457
10458 /* Write the word the lazy way. */
10459 uint64_t *pu64Dst;
10460 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10461 if (rc == VINF_SUCCESS)
10462 {
10463 *pu64Dst = u64Value;
10464 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10465 }
10466
10467 /* Commit the new RSP value unless we an access handler made trouble. */
10468 if (rc == VINF_SUCCESS)
10469 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10470
10471 return rc;
10472}
10473
10474
10475/**
10476 * Pops a word from the stack.
10477 *
10478 * @returns Strict VBox status code.
10479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10480 * @param pu16Value Where to store the popped value.
10481 */
10482IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10483{
10484 /* Increment the stack pointer. */
10485 uint64_t uNewRsp;
10486 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10487
10488 /* Write the word the lazy way. */
10489 uint16_t const *pu16Src;
10490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10491 if (rc == VINF_SUCCESS)
10492 {
10493 *pu16Value = *pu16Src;
10494 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10495
10496 /* Commit the new RSP value. */
10497 if (rc == VINF_SUCCESS)
10498 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10499 }
10500
10501 return rc;
10502}
10503
10504
10505/**
10506 * Pops a dword from the stack.
10507 *
10508 * @returns Strict VBox status code.
10509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10510 * @param pu32Value Where to store the popped value.
10511 */
10512IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10513{
10514 /* Increment the stack pointer. */
10515 uint64_t uNewRsp;
10516 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10517
10518 /* Write the word the lazy way. */
10519 uint32_t const *pu32Src;
10520 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10521 if (rc == VINF_SUCCESS)
10522 {
10523 *pu32Value = *pu32Src;
10524 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10525
10526 /* Commit the new RSP value. */
10527 if (rc == VINF_SUCCESS)
10528 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10529 }
10530
10531 return rc;
10532}
10533
10534
10535/**
10536 * Pops a qword from the stack.
10537 *
10538 * @returns Strict VBox status code.
10539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10540 * @param pu64Value Where to store the popped value.
10541 */
10542IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10543{
10544 /* Increment the stack pointer. */
10545 uint64_t uNewRsp;
10546 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10547
10548 /* Write the word the lazy way. */
10549 uint64_t const *pu64Src;
10550 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10551 if (rc == VINF_SUCCESS)
10552 {
10553 *pu64Value = *pu64Src;
10554 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10555
10556 /* Commit the new RSP value. */
10557 if (rc == VINF_SUCCESS)
10558 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10559 }
10560
10561 return rc;
10562}
10563
10564
10565/**
10566 * Pushes a word onto the stack, using a temporary stack pointer.
10567 *
10568 * @returns Strict VBox status code.
10569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10570 * @param u16Value The value to push.
10571 * @param pTmpRsp Pointer to the temporary stack pointer.
10572 */
10573IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10574{
10575 /* Increment the stack pointer. */
10576 RTUINT64U NewRsp = *pTmpRsp;
10577 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10578
10579 /* Write the word the lazy way. */
10580 uint16_t *pu16Dst;
10581 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10582 if (rc == VINF_SUCCESS)
10583 {
10584 *pu16Dst = u16Value;
10585 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10586 }
10587
10588 /* Commit the new RSP value unless we an access handler made trouble. */
10589 if (rc == VINF_SUCCESS)
10590 *pTmpRsp = NewRsp;
10591
10592 return rc;
10593}
10594
10595
10596/**
10597 * Pushes a dword onto the stack, using a temporary stack pointer.
10598 *
10599 * @returns Strict VBox status code.
10600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10601 * @param u32Value The value to push.
10602 * @param pTmpRsp Pointer to the temporary stack pointer.
10603 */
10604IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10605{
10606 /* Increment the stack pointer. */
10607 RTUINT64U NewRsp = *pTmpRsp;
10608 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10609
10610 /* Write the word the lazy way. */
10611 uint32_t *pu32Dst;
10612 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10613 if (rc == VINF_SUCCESS)
10614 {
10615 *pu32Dst = u32Value;
10616 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10617 }
10618
10619 /* Commit the new RSP value unless we an access handler made trouble. */
10620 if (rc == VINF_SUCCESS)
10621 *pTmpRsp = NewRsp;
10622
10623 return rc;
10624}
10625
10626
10627/**
10628 * Pushes a dword onto the stack, using a temporary stack pointer.
10629 *
10630 * @returns Strict VBox status code.
10631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10632 * @param u64Value The value to push.
10633 * @param pTmpRsp Pointer to the temporary stack pointer.
10634 */
10635IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10636{
10637 /* Increment the stack pointer. */
10638 RTUINT64U NewRsp = *pTmpRsp;
10639 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10640
10641 /* Write the word the lazy way. */
10642 uint64_t *pu64Dst;
10643 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10644 if (rc == VINF_SUCCESS)
10645 {
10646 *pu64Dst = u64Value;
10647 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10648 }
10649
10650 /* Commit the new RSP value unless we an access handler made trouble. */
10651 if (rc == VINF_SUCCESS)
10652 *pTmpRsp = NewRsp;
10653
10654 return rc;
10655}
10656
10657
10658/**
10659 * Pops a word from the stack, using a temporary stack pointer.
10660 *
10661 * @returns Strict VBox status code.
10662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10663 * @param pu16Value Where to store the popped value.
10664 * @param pTmpRsp Pointer to the temporary stack pointer.
10665 */
10666IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10667{
10668 /* Increment the stack pointer. */
10669 RTUINT64U NewRsp = *pTmpRsp;
10670 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10671
10672 /* Write the word the lazy way. */
10673 uint16_t const *pu16Src;
10674 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10675 if (rc == VINF_SUCCESS)
10676 {
10677 *pu16Value = *pu16Src;
10678 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10679
10680 /* Commit the new RSP value. */
10681 if (rc == VINF_SUCCESS)
10682 *pTmpRsp = NewRsp;
10683 }
10684
10685 return rc;
10686}
10687
10688
10689/**
10690 * Pops a dword from the stack, using a temporary stack pointer.
10691 *
10692 * @returns Strict VBox status code.
10693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10694 * @param pu32Value Where to store the popped value.
10695 * @param pTmpRsp Pointer to the temporary stack pointer.
10696 */
10697IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10698{
10699 /* Increment the stack pointer. */
10700 RTUINT64U NewRsp = *pTmpRsp;
10701 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10702
10703 /* Write the word the lazy way. */
10704 uint32_t const *pu32Src;
10705 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10706 if (rc == VINF_SUCCESS)
10707 {
10708 *pu32Value = *pu32Src;
10709 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10710
10711 /* Commit the new RSP value. */
10712 if (rc == VINF_SUCCESS)
10713 *pTmpRsp = NewRsp;
10714 }
10715
10716 return rc;
10717}
10718
10719
10720/**
10721 * Pops a qword from the stack, using a temporary stack pointer.
10722 *
10723 * @returns Strict VBox status code.
10724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10725 * @param pu64Value Where to store the popped value.
10726 * @param pTmpRsp Pointer to the temporary stack pointer.
10727 */
10728IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10729{
10730 /* Increment the stack pointer. */
10731 RTUINT64U NewRsp = *pTmpRsp;
10732 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10733
10734 /* Write the word the lazy way. */
10735 uint64_t const *pu64Src;
10736 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10737 if (rcStrict == VINF_SUCCESS)
10738 {
10739 *pu64Value = *pu64Src;
10740 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10741
10742 /* Commit the new RSP value. */
10743 if (rcStrict == VINF_SUCCESS)
10744 *pTmpRsp = NewRsp;
10745 }
10746
10747 return rcStrict;
10748}
10749
10750
10751/**
10752 * Begin a special stack push (used by interrupt, exceptions and such).
10753 *
10754 * This will raise \#SS or \#PF if appropriate.
10755 *
10756 * @returns Strict VBox status code.
10757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10758 * @param cbMem The number of bytes to push onto the stack.
10759 * @param ppvMem Where to return the pointer to the stack memory.
10760 * As with the other memory functions this could be
10761 * direct access or bounce buffered access, so
10762 * don't commit register until the commit call
10763 * succeeds.
10764 * @param puNewRsp Where to return the new RSP value. This must be
10765 * passed unchanged to
10766 * iemMemStackPushCommitSpecial().
10767 */
10768IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10769{
10770 Assert(cbMem < UINT8_MAX);
10771 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10772 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10773}
10774
10775
10776/**
10777 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10778 *
10779 * This will update the rSP.
10780 *
10781 * @returns Strict VBox status code.
10782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10783 * @param pvMem The pointer returned by
10784 * iemMemStackPushBeginSpecial().
10785 * @param uNewRsp The new RSP value returned by
10786 * iemMemStackPushBeginSpecial().
10787 */
10788IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10789{
10790 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10791 if (rcStrict == VINF_SUCCESS)
10792 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10793 return rcStrict;
10794}
10795
10796
10797/**
10798 * Begin a special stack pop (used by iret, retf and such).
10799 *
10800 * This will raise \#SS or \#PF if appropriate.
10801 *
10802 * @returns Strict VBox status code.
10803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10804 * @param cbMem The number of bytes to pop from the stack.
10805 * @param ppvMem Where to return the pointer to the stack memory.
10806 * @param puNewRsp Where to return the new RSP value. This must be
10807 * assigned to CPUMCTX::rsp manually some time
10808 * after iemMemStackPopDoneSpecial() has been
10809 * called.
10810 */
10811IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10812{
10813 Assert(cbMem < UINT8_MAX);
10814 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10815 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10816}
10817
10818
10819/**
10820 * Continue a special stack pop (used by iret and retf).
10821 *
10822 * This will raise \#SS or \#PF if appropriate.
10823 *
10824 * @returns Strict VBox status code.
10825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10826 * @param cbMem The number of bytes to pop from the stack.
10827 * @param ppvMem Where to return the pointer to the stack memory.
10828 * @param puNewRsp Where to return the new RSP value. This must be
10829 * assigned to CPUMCTX::rsp manually some time
10830 * after iemMemStackPopDoneSpecial() has been
10831 * called.
10832 */
10833IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10834{
10835 Assert(cbMem < UINT8_MAX);
10836 RTUINT64U NewRsp;
10837 NewRsp.u = *puNewRsp;
10838 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10839 *puNewRsp = NewRsp.u;
10840 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10841}
10842
10843
10844/**
10845 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10846 * iemMemStackPopContinueSpecial).
10847 *
10848 * The caller will manually commit the rSP.
10849 *
10850 * @returns Strict VBox status code.
10851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10852 * @param pvMem The pointer returned by
10853 * iemMemStackPopBeginSpecial() or
10854 * iemMemStackPopContinueSpecial().
10855 */
10856IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10857{
10858 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10859}
10860
10861
10862/**
10863 * Fetches a system table byte.
10864 *
10865 * @returns Strict VBox status code.
10866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10867 * @param pbDst Where to return the byte.
10868 * @param iSegReg The index of the segment register to use for
10869 * this access. The base and limits are checked.
10870 * @param GCPtrMem The address of the guest memory.
10871 */
10872IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10873{
10874 /* The lazy approach for now... */
10875 uint8_t const *pbSrc;
10876 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10877 if (rc == VINF_SUCCESS)
10878 {
10879 *pbDst = *pbSrc;
10880 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10881 }
10882 return rc;
10883}
10884
10885
10886/**
10887 * Fetches a system table word.
10888 *
10889 * @returns Strict VBox status code.
10890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10891 * @param pu16Dst Where to return the word.
10892 * @param iSegReg The index of the segment register to use for
10893 * this access. The base and limits are checked.
10894 * @param GCPtrMem The address of the guest memory.
10895 */
10896IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10897{
10898 /* The lazy approach for now... */
10899 uint16_t const *pu16Src;
10900 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10901 if (rc == VINF_SUCCESS)
10902 {
10903 *pu16Dst = *pu16Src;
10904 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10905 }
10906 return rc;
10907}
10908
10909
10910/**
10911 * Fetches a system table dword.
10912 *
10913 * @returns Strict VBox status code.
10914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10915 * @param pu32Dst Where to return the dword.
10916 * @param iSegReg The index of the segment register to use for
10917 * this access. The base and limits are checked.
10918 * @param GCPtrMem The address of the guest memory.
10919 */
10920IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10921{
10922 /* The lazy approach for now... */
10923 uint32_t const *pu32Src;
10924 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10925 if (rc == VINF_SUCCESS)
10926 {
10927 *pu32Dst = *pu32Src;
10928 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10929 }
10930 return rc;
10931}
10932
10933
10934/**
10935 * Fetches a system table qword.
10936 *
10937 * @returns Strict VBox status code.
10938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10939 * @param pu64Dst Where to return the qword.
10940 * @param iSegReg The index of the segment register to use for
10941 * this access. The base and limits are checked.
10942 * @param GCPtrMem The address of the guest memory.
10943 */
10944IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10945{
10946 /* The lazy approach for now... */
10947 uint64_t const *pu64Src;
10948 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10949 if (rc == VINF_SUCCESS)
10950 {
10951 *pu64Dst = *pu64Src;
10952 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10953 }
10954 return rc;
10955}
10956
10957
10958/**
10959 * Fetches a descriptor table entry with caller specified error code.
10960 *
10961 * @returns Strict VBox status code.
10962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10963 * @param pDesc Where to return the descriptor table entry.
10964 * @param uSel The selector which table entry to fetch.
10965 * @param uXcpt The exception to raise on table lookup error.
10966 * @param uErrorCode The error code associated with the exception.
10967 */
10968IEM_STATIC VBOXSTRICTRC
10969iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10970{
10971 AssertPtr(pDesc);
10972 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10973
10974 /** @todo did the 286 require all 8 bytes to be accessible? */
10975 /*
10976 * Get the selector table base and check bounds.
10977 */
10978 RTGCPTR GCPtrBase;
10979 if (uSel & X86_SEL_LDT)
10980 {
10981 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10982 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10983 {
10984 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10985 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10986 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10987 uErrorCode, 0);
10988 }
10989
10990 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10991 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10992 }
10993 else
10994 {
10995 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10996 {
10997 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10998 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10999 uErrorCode, 0);
11000 }
11001 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11002 }
11003
11004 /*
11005 * Read the legacy descriptor and maybe the long mode extensions if
11006 * required.
11007 */
11008 VBOXSTRICTRC rcStrict;
11009 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11010 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11011 else
11012 {
11013 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11014 if (rcStrict == VINF_SUCCESS)
11015 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11016 if (rcStrict == VINF_SUCCESS)
11017 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11018 if (rcStrict == VINF_SUCCESS)
11019 pDesc->Legacy.au16[3] = 0;
11020 else
11021 return rcStrict;
11022 }
11023
11024 if (rcStrict == VINF_SUCCESS)
11025 {
11026 if ( !IEM_IS_LONG_MODE(pVCpu)
11027 || pDesc->Legacy.Gen.u1DescType)
11028 pDesc->Long.au64[1] = 0;
11029 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11030 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11031 else
11032 {
11033 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11034 /** @todo is this the right exception? */
11035 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11036 }
11037 }
11038 return rcStrict;
11039}
11040
11041
11042/**
11043 * Fetches a descriptor table entry.
11044 *
11045 * @returns Strict VBox status code.
11046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11047 * @param pDesc Where to return the descriptor table entry.
11048 * @param uSel The selector which table entry to fetch.
11049 * @param uXcpt The exception to raise on table lookup error.
11050 */
11051IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11052{
11053 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11054}
11055
11056
11057/**
11058 * Fakes a long mode stack selector for SS = 0.
11059 *
11060 * @param pDescSs Where to return the fake stack descriptor.
11061 * @param uDpl The DPL we want.
11062 */
11063IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11064{
11065 pDescSs->Long.au64[0] = 0;
11066 pDescSs->Long.au64[1] = 0;
11067 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11068 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11069 pDescSs->Long.Gen.u2Dpl = uDpl;
11070 pDescSs->Long.Gen.u1Present = 1;
11071 pDescSs->Long.Gen.u1Long = 1;
11072}
11073
11074
11075/**
11076 * Marks the selector descriptor as accessed (only non-system descriptors).
11077 *
11078 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11079 * will therefore skip the limit checks.
11080 *
11081 * @returns Strict VBox status code.
11082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11083 * @param uSel The selector.
11084 */
11085IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11086{
11087 /*
11088 * Get the selector table base and calculate the entry address.
11089 */
11090 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11091 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11092 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11093 GCPtr += uSel & X86_SEL_MASK;
11094
11095 /*
11096 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11097 * ugly stuff to avoid this. This will make sure it's an atomic access
11098 * as well more or less remove any question about 8-bit or 32-bit accesss.
11099 */
11100 VBOXSTRICTRC rcStrict;
11101 uint32_t volatile *pu32;
11102 if ((GCPtr & 3) == 0)
11103 {
11104 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11105 GCPtr += 2 + 2;
11106 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11107 if (rcStrict != VINF_SUCCESS)
11108 return rcStrict;
11109 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11110 }
11111 else
11112 {
11113 /* The misaligned GDT/LDT case, map the whole thing. */
11114 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11115 if (rcStrict != VINF_SUCCESS)
11116 return rcStrict;
11117 switch ((uintptr_t)pu32 & 3)
11118 {
11119 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11120 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11121 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11122 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11123 }
11124 }
11125
11126 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11127}
11128
11129/** @} */
11130
11131
11132/*
11133 * Include the C/C++ implementation of instruction.
11134 */
11135#include "IEMAllCImpl.cpp.h"
11136
11137
11138
11139/** @name "Microcode" macros.
11140 *
11141 * The idea is that we should be able to use the same code to interpret
11142 * instructions as well as recompiler instructions. Thus this obfuscation.
11143 *
11144 * @{
11145 */
11146#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11147#define IEM_MC_END() }
11148#define IEM_MC_PAUSE() do {} while (0)
11149#define IEM_MC_CONTINUE() do {} while (0)
11150
11151/** Internal macro. */
11152#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11153 do \
11154 { \
11155 VBOXSTRICTRC rcStrict2 = a_Expr; \
11156 if (rcStrict2 != VINF_SUCCESS) \
11157 return rcStrict2; \
11158 } while (0)
11159
11160
11161#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11162#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11163#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11164#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11165#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11166#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11167#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11168#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11169#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11170 do { \
11171 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11172 return iemRaiseDeviceNotAvailable(pVCpu); \
11173 } while (0)
11174#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11175 do { \
11176 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11177 return iemRaiseDeviceNotAvailable(pVCpu); \
11178 } while (0)
11179#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11180 do { \
11181 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11182 return iemRaiseMathFault(pVCpu); \
11183 } while (0)
11184#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11185 do { \
11186 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11187 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11188 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11189 return iemRaiseUndefinedOpcode(pVCpu); \
11190 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11191 return iemRaiseDeviceNotAvailable(pVCpu); \
11192 } while (0)
11193#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11194 do { \
11195 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11196 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11197 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11198 return iemRaiseUndefinedOpcode(pVCpu); \
11199 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11200 return iemRaiseDeviceNotAvailable(pVCpu); \
11201 } while (0)
11202#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11203 do { \
11204 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11205 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11206 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11207 return iemRaiseUndefinedOpcode(pVCpu); \
11208 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11209 return iemRaiseDeviceNotAvailable(pVCpu); \
11210 } while (0)
11211#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11212 do { \
11213 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11214 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11215 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11216 return iemRaiseUndefinedOpcode(pVCpu); \
11217 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11218 return iemRaiseDeviceNotAvailable(pVCpu); \
11219 } while (0)
11220#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11221 do { \
11222 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11223 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11224 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11225 return iemRaiseUndefinedOpcode(pVCpu); \
11226 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11227 return iemRaiseDeviceNotAvailable(pVCpu); \
11228 } while (0)
11229#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11230 do { \
11231 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11232 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11233 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11234 return iemRaiseUndefinedOpcode(pVCpu); \
11235 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11236 return iemRaiseDeviceNotAvailable(pVCpu); \
11237 } while (0)
11238#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11239 do { \
11240 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11241 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11242 return iemRaiseUndefinedOpcode(pVCpu); \
11243 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11244 return iemRaiseDeviceNotAvailable(pVCpu); \
11245 } while (0)
11246#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11247 do { \
11248 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11249 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11250 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11251 return iemRaiseUndefinedOpcode(pVCpu); \
11252 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11253 return iemRaiseDeviceNotAvailable(pVCpu); \
11254 } while (0)
11255#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11256 do { \
11257 if (pVCpu->iem.s.uCpl != 0) \
11258 return iemRaiseGeneralProtectionFault0(pVCpu); \
11259 } while (0)
11260#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11261 do { \
11262 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11263 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11264 } while (0)
11265#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11266 do { \
11267 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11268 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11269 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11270 return iemRaiseUndefinedOpcode(pVCpu); \
11271 } while (0)
11272#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11273 do { \
11274 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11275 return iemRaiseGeneralProtectionFault0(pVCpu); \
11276 } while (0)
11277
11278
11279#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11280#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11281#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11282#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11283#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11284#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11285#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11286 uint32_t a_Name; \
11287 uint32_t *a_pName = &a_Name
11288#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11289 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11290
11291#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11292#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11293
11294#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11295#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11296#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11297#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11298#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11299#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11300#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11311#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11312 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11313 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11314 } while (0)
11315#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11316 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11317 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11318 } while (0)
11319#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11320 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11321 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11322 } while (0)
11323/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11324#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11325 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11326 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11327 } while (0)
11328#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11329 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11330 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11331 } while (0)
11332/** @note Not for IOPL or IF testing or modification. */
11333#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11334#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11335#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11336#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11337
11338#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11339#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11340#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11341#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11342#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11343#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11344#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11345#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11346#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11347#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11348/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11349#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11350 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11351 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11352 } while (0)
11353#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11354 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11355 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11356 } while (0)
11357#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11358 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11359
11360
11361#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11362#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11363/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11364 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11365#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11366#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11367/** @note Not for IOPL or IF testing or modification. */
11368#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11369
11370#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11371#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11372#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11373 do { \
11374 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11375 *pu32Reg += (a_u32Value); \
11376 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11377 } while (0)
11378#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11379
11380#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11381#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11382#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11383 do { \
11384 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11385 *pu32Reg -= (a_u32Value); \
11386 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11387 } while (0)
11388#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11389#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11390
11391#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11392#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11393#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11394#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11395#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11396#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11397#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11398
11399#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11400#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11401#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11402#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11403
11404#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11405#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11406#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11407
11408#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11409#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11410#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11411
11412#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11413#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11414#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11415
11416#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11417#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11418#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11419
11420#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11421
11422#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11423
11424#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11425#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11426#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11427 do { \
11428 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11429 *pu32Reg &= (a_u32Value); \
11430 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11431 } while (0)
11432#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11433
11434#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11435#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11436#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11437 do { \
11438 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11439 *pu32Reg |= (a_u32Value); \
11440 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11441 } while (0)
11442#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11443
11444
11445/** @note Not for IOPL or IF modification. */
11446#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11447/** @note Not for IOPL or IF modification. */
11448#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11449/** @note Not for IOPL or IF modification. */
11450#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11451
11452#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11453
11454/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11455#define IEM_MC_FPU_TO_MMX_MODE() do { \
11456 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11457 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11458 } while (0)
11459
11460/** Switches the FPU state from MMX mode (FTW=0xffff). */
11461#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11462 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11463 } while (0)
11464
11465#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11466 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11467#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11468 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11469#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11470 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11471 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11472 } while (0)
11473#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11474 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11475 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11476 } while (0)
11477#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11478 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11479#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11480 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11481#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11482 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11483
11484#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11485 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11486 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11487 } while (0)
11488#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11489 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11490#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11491 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11492#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11493 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11494#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11495 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11496 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11497 } while (0)
11498#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11499 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11500#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11501 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11502 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11503 } while (0)
11504#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11505 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11506#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11507 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11508 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11509 } while (0)
11510#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11511 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11512#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11513 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11514#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11515 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11516#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11517 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11518#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11519 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11520 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11521 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11522 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11523 } while (0)
11524
11525#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11526 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11527 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11528 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11529 } while (0)
11530#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11531 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11532 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11533 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11534 } while (0)
11535#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11536 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11537 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11538 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11539 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11540 } while (0)
11541#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11542 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11543 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11544 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11545 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11546 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11547 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11548 } while (0)
11549
11550#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11551#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11552 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11553 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11554 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11555 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11556 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11557 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11558 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11559 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11560 } while (0)
11561#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11562 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11563 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11564 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11566 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11568 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11569 } while (0)
11570#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11571 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11572 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11573 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11574 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11575 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11577 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11578 } while (0)
11579#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11580 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11581 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11582 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11583 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11584 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11585 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11586 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11587 } while (0)
11588
11589#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11590 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11591#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11592 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11593#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11594 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11595#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11596 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11597 uintptr_t const iYRegTmp = (a_iYReg); \
11598 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11599 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11600 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11601 } while (0)
11602
11603#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11604 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11605 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11606 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11607 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11608 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11609 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11610 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11611 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11612 } while (0)
11613#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11614 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11615 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11616 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11617 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11618 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11619 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11620 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11621 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11622 } while (0)
11623#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11624 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11625 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11626 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11627 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11628 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11629 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11630 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11631 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11632 } while (0)
11633
11634#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11635 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11636 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11637 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11638 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11639 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11640 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11641 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11642 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11643 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11644 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11645 } while (0)
11646#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11647 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11648 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11649 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11650 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11651 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11652 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11653 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11654 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11655 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11656 } while (0)
11657#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11658 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11659 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11660 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11661 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11662 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11663 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11664 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11665 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11666 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11667 } while (0)
11668#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11669 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11670 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11671 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11672 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11673 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11674 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11675 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11676 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11677 } while (0)
11678
11679#ifndef IEM_WITH_SETJMP
11680# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11684# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11686#else
11687# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11688 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11689# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11690 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11691# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11692 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11693#endif
11694
11695#ifndef IEM_WITH_SETJMP
11696# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11698# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11700# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11702#else
11703# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11704 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11706 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11707# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11708 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11709#endif
11710
11711#ifndef IEM_WITH_SETJMP
11712# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11714# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11716# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11718#else
11719# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11720 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11721# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11722 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11723# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11724 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11725#endif
11726
11727#ifdef SOME_UNUSED_FUNCTION
11728# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11730#endif
11731
11732#ifndef IEM_WITH_SETJMP
11733# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11735# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11737# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11739# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11741#else
11742# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11743 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11744# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11745 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11746# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11747 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11749 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11750#endif
11751
11752#ifndef IEM_WITH_SETJMP
11753# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11755# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11757# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11759#else
11760# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11761 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11762# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11763 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11764# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11765 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11766#endif
11767
11768#ifndef IEM_WITH_SETJMP
11769# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11771# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11773#else
11774# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11775 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11776# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11777 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11778#endif
11779
11780#ifndef IEM_WITH_SETJMP
11781# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11783# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11785#else
11786# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11787 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11788# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11789 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11790#endif
11791
11792
11793
11794#ifndef IEM_WITH_SETJMP
11795# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11796 do { \
11797 uint8_t u8Tmp; \
11798 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11799 (a_u16Dst) = u8Tmp; \
11800 } while (0)
11801# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11802 do { \
11803 uint8_t u8Tmp; \
11804 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11805 (a_u32Dst) = u8Tmp; \
11806 } while (0)
11807# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11808 do { \
11809 uint8_t u8Tmp; \
11810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11811 (a_u64Dst) = u8Tmp; \
11812 } while (0)
11813# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11814 do { \
11815 uint16_t u16Tmp; \
11816 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11817 (a_u32Dst) = u16Tmp; \
11818 } while (0)
11819# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11820 do { \
11821 uint16_t u16Tmp; \
11822 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11823 (a_u64Dst) = u16Tmp; \
11824 } while (0)
11825# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11826 do { \
11827 uint32_t u32Tmp; \
11828 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11829 (a_u64Dst) = u32Tmp; \
11830 } while (0)
11831#else /* IEM_WITH_SETJMP */
11832# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11833 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11834# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11835 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11836# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11837 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11838# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11839 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11840# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11841 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11842# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11843 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11844#endif /* IEM_WITH_SETJMP */
11845
11846#ifndef IEM_WITH_SETJMP
11847# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11848 do { \
11849 uint8_t u8Tmp; \
11850 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11851 (a_u16Dst) = (int8_t)u8Tmp; \
11852 } while (0)
11853# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11854 do { \
11855 uint8_t u8Tmp; \
11856 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11857 (a_u32Dst) = (int8_t)u8Tmp; \
11858 } while (0)
11859# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11860 do { \
11861 uint8_t u8Tmp; \
11862 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11863 (a_u64Dst) = (int8_t)u8Tmp; \
11864 } while (0)
11865# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11866 do { \
11867 uint16_t u16Tmp; \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11869 (a_u32Dst) = (int16_t)u16Tmp; \
11870 } while (0)
11871# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11872 do { \
11873 uint16_t u16Tmp; \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11875 (a_u64Dst) = (int16_t)u16Tmp; \
11876 } while (0)
11877# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11878 do { \
11879 uint32_t u32Tmp; \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11881 (a_u64Dst) = (int32_t)u32Tmp; \
11882 } while (0)
11883#else /* IEM_WITH_SETJMP */
11884# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11885 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11886# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11887 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11888# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11889 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11890# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11891 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11892# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11893 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11894# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11895 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11896#endif /* IEM_WITH_SETJMP */
11897
11898#ifndef IEM_WITH_SETJMP
11899# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11900 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11901# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11902 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11903# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11904 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11905# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11906 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11907#else
11908# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11909 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11910# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11911 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11912# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11913 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11914# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11915 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11916#endif
11917
11918#ifndef IEM_WITH_SETJMP
11919# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11920 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11921# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11922 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11923# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11924 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11925# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11926 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11927#else
11928# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11929 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11930# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11931 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11932# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11933 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11934# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11935 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11936#endif
11937
11938#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11939#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11940#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11941#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11942#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11943#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11944#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11945 do { \
11946 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11947 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11948 } while (0)
11949
11950#ifndef IEM_WITH_SETJMP
11951# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11952 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11953# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11954 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11955#else
11956# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11957 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11958# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11959 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11960#endif
11961
11962#ifndef IEM_WITH_SETJMP
11963# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11965# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11966 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11967#else
11968# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11969 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11970# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11971 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11972#endif
11973
11974
11975#define IEM_MC_PUSH_U16(a_u16Value) \
11976 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11977#define IEM_MC_PUSH_U32(a_u32Value) \
11978 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11979#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11980 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11981#define IEM_MC_PUSH_U64(a_u64Value) \
11982 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11983
11984#define IEM_MC_POP_U16(a_pu16Value) \
11985 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11986#define IEM_MC_POP_U32(a_pu32Value) \
11987 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11988#define IEM_MC_POP_U64(a_pu64Value) \
11989 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11990
11991/** Maps guest memory for direct or bounce buffered access.
11992 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11993 * @remarks May return.
11994 */
11995#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11996 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11997
11998/** Maps guest memory for direct or bounce buffered access.
11999 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12000 * @remarks May return.
12001 */
12002#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12003 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12004
12005/** Commits the memory and unmaps the guest memory.
12006 * @remarks May return.
12007 */
12008#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12009 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12010
12011/** Commits the memory and unmaps the guest memory unless the FPU status word
12012 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12013 * that would cause FLD not to store.
12014 *
12015 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12016 * store, while \#P will not.
12017 *
12018 * @remarks May in theory return - for now.
12019 */
12020#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12021 do { \
12022 if ( !(a_u16FSW & X86_FSW_ES) \
12023 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12024 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12025 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12026 } while (0)
12027
12028/** Calculate efficient address from R/M. */
12029#ifndef IEM_WITH_SETJMP
12030# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12031 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12032#else
12033# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12034 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12035#endif
12036
12037#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12038#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12039#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12040#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12041#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12042#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12043#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12044
12045/**
12046 * Defers the rest of the instruction emulation to a C implementation routine
12047 * and returns, only taking the standard parameters.
12048 *
12049 * @param a_pfnCImpl The pointer to the C routine.
12050 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12051 */
12052#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12053
12054/**
12055 * Defers the rest of instruction emulation to a C implementation routine and
12056 * returns, taking one argument in addition to the standard ones.
12057 *
12058 * @param a_pfnCImpl The pointer to the C routine.
12059 * @param a0 The argument.
12060 */
12061#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12062
12063/**
12064 * Defers the rest of the instruction emulation to a C implementation routine
12065 * and returns, taking two arguments in addition to the standard ones.
12066 *
12067 * @param a_pfnCImpl The pointer to the C routine.
12068 * @param a0 The first extra argument.
12069 * @param a1 The second extra argument.
12070 */
12071#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12072
12073/**
12074 * Defers the rest of the instruction emulation to a C implementation routine
12075 * and returns, taking three arguments in addition to the standard ones.
12076 *
12077 * @param a_pfnCImpl The pointer to the C routine.
12078 * @param a0 The first extra argument.
12079 * @param a1 The second extra argument.
12080 * @param a2 The third extra argument.
12081 */
12082#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12083
12084/**
12085 * Defers the rest of the instruction emulation to a C implementation routine
12086 * and returns, taking four arguments in addition to the standard ones.
12087 *
12088 * @param a_pfnCImpl The pointer to the C routine.
12089 * @param a0 The first extra argument.
12090 * @param a1 The second extra argument.
12091 * @param a2 The third extra argument.
12092 * @param a3 The fourth extra argument.
12093 */
12094#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12095
12096/**
12097 * Defers the rest of the instruction emulation to a C implementation routine
12098 * and returns, taking two arguments in addition to the standard ones.
12099 *
12100 * @param a_pfnCImpl The pointer to the C routine.
12101 * @param a0 The first extra argument.
12102 * @param a1 The second extra argument.
12103 * @param a2 The third extra argument.
12104 * @param a3 The fourth extra argument.
12105 * @param a4 The fifth extra argument.
12106 */
12107#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12108
12109/**
12110 * Defers the entire instruction emulation to a C implementation routine and
12111 * returns, only taking the standard parameters.
12112 *
12113 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12114 *
12115 * @param a_pfnCImpl The pointer to the C routine.
12116 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12117 */
12118#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12119
12120/**
12121 * Defers the entire instruction emulation to a C implementation routine and
12122 * returns, taking one argument in addition to the standard ones.
12123 *
12124 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12125 *
12126 * @param a_pfnCImpl The pointer to the C routine.
12127 * @param a0 The argument.
12128 */
12129#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12130
12131/**
12132 * Defers the entire instruction emulation to a C implementation routine and
12133 * returns, taking two arguments in addition to the standard ones.
12134 *
12135 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12136 *
12137 * @param a_pfnCImpl The pointer to the C routine.
12138 * @param a0 The first extra argument.
12139 * @param a1 The second extra argument.
12140 */
12141#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12142
12143/**
12144 * Defers the entire instruction emulation to a C implementation routine and
12145 * returns, taking three arguments in addition to the standard ones.
12146 *
12147 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12148 *
12149 * @param a_pfnCImpl The pointer to the C routine.
12150 * @param a0 The first extra argument.
12151 * @param a1 The second extra argument.
12152 * @param a2 The third extra argument.
12153 */
12154#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12155
12156/**
12157 * Calls a FPU assembly implementation taking one visible argument.
12158 *
12159 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12160 * @param a0 The first extra argument.
12161 */
12162#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12163 do { \
12164 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12165 } while (0)
12166
12167/**
12168 * Calls a FPU assembly implementation taking two visible arguments.
12169 *
12170 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12171 * @param a0 The first extra argument.
12172 * @param a1 The second extra argument.
12173 */
12174#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12175 do { \
12176 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12177 } while (0)
12178
12179/**
12180 * Calls a FPU assembly implementation taking three visible arguments.
12181 *
12182 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12183 * @param a0 The first extra argument.
12184 * @param a1 The second extra argument.
12185 * @param a2 The third extra argument.
12186 */
12187#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12188 do { \
12189 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12190 } while (0)
12191
12192#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12193 do { \
12194 (a_FpuData).FSW = (a_FSW); \
12195 (a_FpuData).r80Result = *(a_pr80Value); \
12196 } while (0)
12197
12198/** Pushes FPU result onto the stack. */
12199#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12200 iemFpuPushResult(pVCpu, &a_FpuData)
12201/** Pushes FPU result onto the stack and sets the FPUDP. */
12202#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12203 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12204
12205/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12206#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12207 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12208
12209/** Stores FPU result in a stack register. */
12210#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12211 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12212/** Stores FPU result in a stack register and pops the stack. */
12213#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12214 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12215/** Stores FPU result in a stack register and sets the FPUDP. */
12216#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12217 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12218/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12219 * stack. */
12220#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12221 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12222
12223/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12224#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12225 iemFpuUpdateOpcodeAndIp(pVCpu)
12226/** Free a stack register (for FFREE and FFREEP). */
12227#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12228 iemFpuStackFree(pVCpu, a_iStReg)
12229/** Increment the FPU stack pointer. */
12230#define IEM_MC_FPU_STACK_INC_TOP() \
12231 iemFpuStackIncTop(pVCpu)
12232/** Decrement the FPU stack pointer. */
12233#define IEM_MC_FPU_STACK_DEC_TOP() \
12234 iemFpuStackDecTop(pVCpu)
12235
12236/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12237#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12238 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12239/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12240#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12241 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12242/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12243#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12244 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12245/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12246#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12247 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12248/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12249 * stack. */
12250#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12251 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12252/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12253#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12254 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12255
12256/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12257#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12258 iemFpuStackUnderflow(pVCpu, a_iStDst)
12259/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12260 * stack. */
12261#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12262 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12263/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12264 * FPUDS. */
12265#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12266 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12267/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12268 * FPUDS. Pops stack. */
12269#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12270 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12271/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12272 * stack twice. */
12273#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12274 iemFpuStackUnderflowThenPopPop(pVCpu)
12275/** Raises a FPU stack underflow exception for an instruction pushing a result
12276 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12277#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12278 iemFpuStackPushUnderflow(pVCpu)
12279/** Raises a FPU stack underflow exception for an instruction pushing a result
12280 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12281#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12282 iemFpuStackPushUnderflowTwo(pVCpu)
12283
12284/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12285 * FPUIP, FPUCS and FOP. */
12286#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12287 iemFpuStackPushOverflow(pVCpu)
12288/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12289 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12290#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12291 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12292/** Prepares for using the FPU state.
12293 * Ensures that we can use the host FPU in the current context (RC+R0.
12294 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12295#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12296/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12297#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12298/** Actualizes the guest FPU state so it can be accessed and modified. */
12299#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12300
12301/** Prepares for using the SSE state.
12302 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12303 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12304#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12305/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12306#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12307/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12308#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12309
12310/** Prepares for using the AVX state.
12311 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12312 * Ensures the guest AVX state in the CPUMCTX is up to date.
12313 * @note This will include the AVX512 state too when support for it is added
12314 * due to the zero extending feature of VEX instruction. */
12315#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12316/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12317#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12318/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12319#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12320
12321/**
12322 * Calls a MMX assembly implementation taking two visible arguments.
12323 *
12324 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12325 * @param a0 The first extra argument.
12326 * @param a1 The second extra argument.
12327 */
12328#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12329 do { \
12330 IEM_MC_PREPARE_FPU_USAGE(); \
12331 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12332 } while (0)
12333
12334/**
12335 * Calls a MMX assembly implementation taking three visible arguments.
12336 *
12337 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12338 * @param a0 The first extra argument.
12339 * @param a1 The second extra argument.
12340 * @param a2 The third extra argument.
12341 */
12342#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12343 do { \
12344 IEM_MC_PREPARE_FPU_USAGE(); \
12345 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12346 } while (0)
12347
12348
12349/**
12350 * Calls a SSE assembly implementation taking two visible arguments.
12351 *
12352 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12353 * @param a0 The first extra argument.
12354 * @param a1 The second extra argument.
12355 */
12356#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12357 do { \
12358 IEM_MC_PREPARE_SSE_USAGE(); \
12359 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12360 } while (0)
12361
12362/**
12363 * Calls a SSE assembly implementation taking three visible arguments.
12364 *
12365 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12366 * @param a0 The first extra argument.
12367 * @param a1 The second extra argument.
12368 * @param a2 The third extra argument.
12369 */
12370#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12371 do { \
12372 IEM_MC_PREPARE_SSE_USAGE(); \
12373 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12374 } while (0)
12375
12376
12377/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12378 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12379#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12380 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12381
12382/**
12383 * Calls a AVX assembly implementation taking two visible arguments.
12384 *
12385 * There is one implicit zero'th argument, a pointer to the extended state.
12386 *
12387 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12388 * @param a1 The first extra argument.
12389 * @param a2 The second extra argument.
12390 */
12391#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12392 do { \
12393 IEM_MC_PREPARE_AVX_USAGE(); \
12394 a_pfnAImpl(pXState, (a1), (a2)); \
12395 } while (0)
12396
12397/**
12398 * Calls a AVX assembly implementation taking three visible arguments.
12399 *
12400 * There is one implicit zero'th argument, a pointer to the extended state.
12401 *
12402 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12403 * @param a1 The first extra argument.
12404 * @param a2 The second extra argument.
12405 * @param a3 The third extra argument.
12406 */
12407#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12408 do { \
12409 IEM_MC_PREPARE_AVX_USAGE(); \
12410 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12411 } while (0)
12412
12413/** @note Not for IOPL or IF testing. */
12414#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12415/** @note Not for IOPL or IF testing. */
12416#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12417/** @note Not for IOPL or IF testing. */
12418#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12421/** @note Not for IOPL or IF testing. */
12422#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12423 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12424 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12425/** @note Not for IOPL or IF testing. */
12426#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12427 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12428 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12429/** @note Not for IOPL or IF testing. */
12430#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12431 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12432 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12433 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12434/** @note Not for IOPL or IF testing. */
12435#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12436 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12437 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12438 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12439#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12440#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12441#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12442/** @note Not for IOPL or IF testing. */
12443#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12444 if ( pVCpu->cpum.GstCtx.cx != 0 \
12445 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12446/** @note Not for IOPL or IF testing. */
12447#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12448 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12449 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12450/** @note Not for IOPL or IF testing. */
12451#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12452 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12453 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12454/** @note Not for IOPL or IF testing. */
12455#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12456 if ( pVCpu->cpum.GstCtx.cx != 0 \
12457 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12458/** @note Not for IOPL or IF testing. */
12459#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12460 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12461 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12462/** @note Not for IOPL or IF testing. */
12463#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12464 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12465 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12466#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12467#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12468
12469#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12470 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12471#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12472 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12473#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12474 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12475#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12476 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12477#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12478 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12479#define IEM_MC_IF_FCW_IM() \
12480 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12481
12482#define IEM_MC_ELSE() } else {
12483#define IEM_MC_ENDIF() } do {} while (0)
12484
12485/** @} */
12486
12487
12488/** @name Opcode Debug Helpers.
12489 * @{
12490 */
12491#ifdef VBOX_WITH_STATISTICS
12492# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12493#else
12494# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12495#endif
12496
12497#ifdef DEBUG
12498# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12499 do { \
12500 IEMOP_INC_STATS(a_Stats); \
12501 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12502 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12503 } while (0)
12504
12505# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12506 do { \
12507 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12508 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12509 (void)RT_CONCAT(OP_,a_Upper); \
12510 (void)(a_fDisHints); \
12511 (void)(a_fIemHints); \
12512 } while (0)
12513
12514# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12515 do { \
12516 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12517 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12518 (void)RT_CONCAT(OP_,a_Upper); \
12519 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12520 (void)(a_fDisHints); \
12521 (void)(a_fIemHints); \
12522 } while (0)
12523
12524# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12525 do { \
12526 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12527 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12528 (void)RT_CONCAT(OP_,a_Upper); \
12529 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12530 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12531 (void)(a_fDisHints); \
12532 (void)(a_fIemHints); \
12533 } while (0)
12534
12535# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12536 do { \
12537 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12538 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12539 (void)RT_CONCAT(OP_,a_Upper); \
12540 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12541 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12542 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12543 (void)(a_fDisHints); \
12544 (void)(a_fIemHints); \
12545 } while (0)
12546
12547# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12548 do { \
12549 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12550 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12551 (void)RT_CONCAT(OP_,a_Upper); \
12552 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12553 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12554 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12555 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12556 (void)(a_fDisHints); \
12557 (void)(a_fIemHints); \
12558 } while (0)
12559
12560#else
12561# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12562
12563# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12564 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12565# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12566 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12567# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12568 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12569# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12570 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12571# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12572 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12573
12574#endif
12575
12576#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12577 IEMOP_MNEMONIC0EX(a_Lower, \
12578 #a_Lower, \
12579 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12580#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12581 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12582 #a_Lower " " #a_Op1, \
12583 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12584#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12585 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12586 #a_Lower " " #a_Op1 "," #a_Op2, \
12587 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12588#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12589 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12590 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12591 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12592#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12593 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12594 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12595 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12596
12597/** @} */
12598
12599
12600/** @name Opcode Helpers.
12601 * @{
12602 */
12603
12604#ifdef IN_RING3
12605# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12606 do { \
12607 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12608 else \
12609 { \
12610 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12611 return IEMOP_RAISE_INVALID_OPCODE(); \
12612 } \
12613 } while (0)
12614#else
12615# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12616 do { \
12617 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12618 else return IEMOP_RAISE_INVALID_OPCODE(); \
12619 } while (0)
12620#endif
12621
12622/** The instruction requires a 186 or later. */
12623#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12624# define IEMOP_HLP_MIN_186() do { } while (0)
12625#else
12626# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12627#endif
12628
12629/** The instruction requires a 286 or later. */
12630#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12631# define IEMOP_HLP_MIN_286() do { } while (0)
12632#else
12633# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12634#endif
12635
12636/** The instruction requires a 386 or later. */
12637#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12638# define IEMOP_HLP_MIN_386() do { } while (0)
12639#else
12640# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12641#endif
12642
12643/** The instruction requires a 386 or later if the given expression is true. */
12644#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12645# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12646#else
12647# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12648#endif
12649
12650/** The instruction requires a 486 or later. */
12651#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12652# define IEMOP_HLP_MIN_486() do { } while (0)
12653#else
12654# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12655#endif
12656
12657/** The instruction requires a Pentium (586) or later. */
12658#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12659# define IEMOP_HLP_MIN_586() do { } while (0)
12660#else
12661# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12662#endif
12663
12664/** The instruction requires a PentiumPro (686) or later. */
12665#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12666# define IEMOP_HLP_MIN_686() do { } while (0)
12667#else
12668# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12669#endif
12670
12671
12672/** The instruction raises an \#UD in real and V8086 mode. */
12673#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12674 do \
12675 { \
12676 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12677 else return IEMOP_RAISE_INVALID_OPCODE(); \
12678 } while (0)
12679
12680#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12681/** This instruction raises an \#UD in real and V8086 mode or when not using a
12682 * 64-bit code segment when in long mode (applicable to all VMX instructions
12683 * except VMCALL).
12684 */
12685#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12686 do \
12687 { \
12688 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12689 && ( !IEM_IS_LONG_MODE(pVCpu) \
12690 || IEM_IS_64BIT_CODE(pVCpu))) \
12691 { /* likely */ } \
12692 else \
12693 { \
12694 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12695 { \
12696 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12697 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12698 return IEMOP_RAISE_INVALID_OPCODE(); \
12699 } \
12700 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12701 { \
12702 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12703 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12704 return IEMOP_RAISE_INVALID_OPCODE(); \
12705 } \
12706 } \
12707 } while (0)
12708
12709/** The instruction can only be executed in VMX operation (VMX root mode and
12710 * non-root mode).
12711 *
12712 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12713 */
12714# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12715 do \
12716 { \
12717 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12718 else \
12719 { \
12720 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12721 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12722 return IEMOP_RAISE_INVALID_OPCODE(); \
12723 } \
12724 } while (0)
12725#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12726
12727/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12728 * 64-bit mode. */
12729#define IEMOP_HLP_NO_64BIT() \
12730 do \
12731 { \
12732 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12733 return IEMOP_RAISE_INVALID_OPCODE(); \
12734 } while (0)
12735
12736/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12737 * 64-bit mode. */
12738#define IEMOP_HLP_ONLY_64BIT() \
12739 do \
12740 { \
12741 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12742 return IEMOP_RAISE_INVALID_OPCODE(); \
12743 } while (0)
12744
12745/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12746#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12747 do \
12748 { \
12749 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12750 iemRecalEffOpSize64Default(pVCpu); \
12751 } while (0)
12752
12753/** The instruction has 64-bit operand size if 64-bit mode. */
12754#define IEMOP_HLP_64BIT_OP_SIZE() \
12755 do \
12756 { \
12757 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12758 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12759 } while (0)
12760
12761/** Only a REX prefix immediately preceeding the first opcode byte takes
12762 * effect. This macro helps ensuring this as well as logging bad guest code. */
12763#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12764 do \
12765 { \
12766 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12767 { \
12768 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12769 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12770 pVCpu->iem.s.uRexB = 0; \
12771 pVCpu->iem.s.uRexIndex = 0; \
12772 pVCpu->iem.s.uRexReg = 0; \
12773 iemRecalEffOpSize(pVCpu); \
12774 } \
12775 } while (0)
12776
12777/**
12778 * Done decoding.
12779 */
12780#define IEMOP_HLP_DONE_DECODING() \
12781 do \
12782 { \
12783 /*nothing for now, maybe later... */ \
12784 } while (0)
12785
12786/**
12787 * Done decoding, raise \#UD exception if lock prefix present.
12788 */
12789#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12790 do \
12791 { \
12792 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12793 { /* likely */ } \
12794 else \
12795 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12796 } while (0)
12797
12798
12799/**
12800 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12801 * repnz or size prefixes are present, or if in real or v8086 mode.
12802 */
12803#define IEMOP_HLP_DONE_VEX_DECODING() \
12804 do \
12805 { \
12806 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12807 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12808 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12809 { /* likely */ } \
12810 else \
12811 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12812 } while (0)
12813
12814/**
12815 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12816 * repnz or size prefixes are present, or if in real or v8086 mode.
12817 */
12818#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12819 do \
12820 { \
12821 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12822 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12823 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12824 && pVCpu->iem.s.uVexLength == 0)) \
12825 { /* likely */ } \
12826 else \
12827 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12828 } while (0)
12829
12830
12831/**
12832 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12833 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12834 * register 0, or if in real or v8086 mode.
12835 */
12836#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12837 do \
12838 { \
12839 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12840 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12841 && !pVCpu->iem.s.uVex3rdReg \
12842 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12843 { /* likely */ } \
12844 else \
12845 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12846 } while (0)
12847
12848/**
12849 * Done decoding VEX, no V, L=0.
12850 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12851 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12852 */
12853#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12854 do \
12855 { \
12856 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12857 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12858 && pVCpu->iem.s.uVexLength == 0 \
12859 && pVCpu->iem.s.uVex3rdReg == 0 \
12860 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12861 { /* likely */ } \
12862 else \
12863 return IEMOP_RAISE_INVALID_OPCODE(); \
12864 } while (0)
12865
12866#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12867 do \
12868 { \
12869 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12870 { /* likely */ } \
12871 else \
12872 { \
12873 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12874 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12875 } \
12876 } while (0)
12877#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12878 do \
12879 { \
12880 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12881 { /* likely */ } \
12882 else \
12883 { \
12884 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12885 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12886 } \
12887 } while (0)
12888
12889/**
12890 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12891 * are present.
12892 */
12893#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12894 do \
12895 { \
12896 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12897 { /* likely */ } \
12898 else \
12899 return IEMOP_RAISE_INVALID_OPCODE(); \
12900 } while (0)
12901
12902/**
12903 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12904 * prefixes are present.
12905 */
12906#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12907 do \
12908 { \
12909 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12910 { /* likely */ } \
12911 else \
12912 return IEMOP_RAISE_INVALID_OPCODE(); \
12913 } while (0)
12914
12915
12916/**
12917 * Calculates the effective address of a ModR/M memory operand.
12918 *
12919 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12920 *
12921 * @return Strict VBox status code.
12922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12923 * @param bRm The ModRM byte.
12924 * @param cbImm The size of any immediate following the
12925 * effective address opcode bytes. Important for
12926 * RIP relative addressing.
12927 * @param pGCPtrEff Where to return the effective address.
12928 */
12929IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12930{
12931 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12932# define SET_SS_DEF() \
12933 do \
12934 { \
12935 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12936 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12937 } while (0)
12938
12939 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12940 {
12941/** @todo Check the effective address size crap! */
12942 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12943 {
12944 uint16_t u16EffAddr;
12945
12946 /* Handle the disp16 form with no registers first. */
12947 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12948 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12949 else
12950 {
12951 /* Get the displacment. */
12952 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12953 {
12954 case 0: u16EffAddr = 0; break;
12955 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12956 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12957 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12958 }
12959
12960 /* Add the base and index registers to the disp. */
12961 switch (bRm & X86_MODRM_RM_MASK)
12962 {
12963 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12964 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12965 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12966 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12967 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12968 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12969 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12970 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12971 }
12972 }
12973
12974 *pGCPtrEff = u16EffAddr;
12975 }
12976 else
12977 {
12978 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12979 uint32_t u32EffAddr;
12980
12981 /* Handle the disp32 form with no registers first. */
12982 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12983 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12984 else
12985 {
12986 /* Get the register (or SIB) value. */
12987 switch ((bRm & X86_MODRM_RM_MASK))
12988 {
12989 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12990 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12991 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12992 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12993 case 4: /* SIB */
12994 {
12995 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12996
12997 /* Get the index and scale it. */
12998 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12999 {
13000 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13001 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13002 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13003 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13004 case 4: u32EffAddr = 0; /*none */ break;
13005 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13006 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13007 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13009 }
13010 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13011
13012 /* add base */
13013 switch (bSib & X86_SIB_BASE_MASK)
13014 {
13015 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13016 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13017 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13018 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13019 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13020 case 5:
13021 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13022 {
13023 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13024 SET_SS_DEF();
13025 }
13026 else
13027 {
13028 uint32_t u32Disp;
13029 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13030 u32EffAddr += u32Disp;
13031 }
13032 break;
13033 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13034 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13035 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13036 }
13037 break;
13038 }
13039 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13040 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13041 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13042 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13043 }
13044
13045 /* Get and add the displacement. */
13046 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13047 {
13048 case 0:
13049 break;
13050 case 1:
13051 {
13052 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13053 u32EffAddr += i8Disp;
13054 break;
13055 }
13056 case 2:
13057 {
13058 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13059 u32EffAddr += u32Disp;
13060 break;
13061 }
13062 default:
13063 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13064 }
13065
13066 }
13067 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13068 *pGCPtrEff = u32EffAddr;
13069 else
13070 {
13071 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13072 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13073 }
13074 }
13075 }
13076 else
13077 {
13078 uint64_t u64EffAddr;
13079
13080 /* Handle the rip+disp32 form with no registers first. */
13081 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13082 {
13083 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13084 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13085 }
13086 else
13087 {
13088 /* Get the register (or SIB) value. */
13089 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13090 {
13091 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13092 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13093 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13094 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13095 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13096 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13097 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13098 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13099 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13100 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13101 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13102 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13103 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13104 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13105 /* SIB */
13106 case 4:
13107 case 12:
13108 {
13109 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13110
13111 /* Get the index and scale it. */
13112 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13113 {
13114 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13115 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13116 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13117 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13118 case 4: u64EffAddr = 0; /*none */ break;
13119 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13120 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13121 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13122 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13123 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13124 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13125 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13126 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13127 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13128 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13129 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13131 }
13132 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13133
13134 /* add base */
13135 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13136 {
13137 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13138 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13139 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13140 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13141 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13142 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13143 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13144 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13145 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13146 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13147 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13148 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13149 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13150 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13151 /* complicated encodings */
13152 case 5:
13153 case 13:
13154 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13155 {
13156 if (!pVCpu->iem.s.uRexB)
13157 {
13158 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13159 SET_SS_DEF();
13160 }
13161 else
13162 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13163 }
13164 else
13165 {
13166 uint32_t u32Disp;
13167 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13168 u64EffAddr += (int32_t)u32Disp;
13169 }
13170 break;
13171 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13172 }
13173 break;
13174 }
13175 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13176 }
13177
13178 /* Get and add the displacement. */
13179 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13180 {
13181 case 0:
13182 break;
13183 case 1:
13184 {
13185 int8_t i8Disp;
13186 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13187 u64EffAddr += i8Disp;
13188 break;
13189 }
13190 case 2:
13191 {
13192 uint32_t u32Disp;
13193 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13194 u64EffAddr += (int32_t)u32Disp;
13195 break;
13196 }
13197 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13198 }
13199
13200 }
13201
13202 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13203 *pGCPtrEff = u64EffAddr;
13204 else
13205 {
13206 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13207 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13208 }
13209 }
13210
13211 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13212 return VINF_SUCCESS;
13213}
13214
13215
13216/**
13217 * Calculates the effective address of a ModR/M memory operand.
13218 *
13219 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13220 *
13221 * @return Strict VBox status code.
13222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13223 * @param bRm The ModRM byte.
13224 * @param cbImm The size of any immediate following the
13225 * effective address opcode bytes. Important for
13226 * RIP relative addressing.
13227 * @param pGCPtrEff Where to return the effective address.
13228 * @param offRsp RSP displacement.
13229 */
13230IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13231{
13232 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13233# define SET_SS_DEF() \
13234 do \
13235 { \
13236 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13237 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13238 } while (0)
13239
13240 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13241 {
13242/** @todo Check the effective address size crap! */
13243 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13244 {
13245 uint16_t u16EffAddr;
13246
13247 /* Handle the disp16 form with no registers first. */
13248 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13249 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13250 else
13251 {
13252 /* Get the displacment. */
13253 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13254 {
13255 case 0: u16EffAddr = 0; break;
13256 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13257 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13258 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13259 }
13260
13261 /* Add the base and index registers to the disp. */
13262 switch (bRm & X86_MODRM_RM_MASK)
13263 {
13264 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13265 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13266 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13267 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13268 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13269 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13270 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13271 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13272 }
13273 }
13274
13275 *pGCPtrEff = u16EffAddr;
13276 }
13277 else
13278 {
13279 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13280 uint32_t u32EffAddr;
13281
13282 /* Handle the disp32 form with no registers first. */
13283 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13284 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13285 else
13286 {
13287 /* Get the register (or SIB) value. */
13288 switch ((bRm & X86_MODRM_RM_MASK))
13289 {
13290 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13291 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13292 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13293 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13294 case 4: /* SIB */
13295 {
13296 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13297
13298 /* Get the index and scale it. */
13299 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13300 {
13301 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13302 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13303 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13304 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13305 case 4: u32EffAddr = 0; /*none */ break;
13306 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13307 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13308 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13309 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13310 }
13311 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13312
13313 /* add base */
13314 switch (bSib & X86_SIB_BASE_MASK)
13315 {
13316 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13317 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13318 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13319 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13320 case 4:
13321 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13322 SET_SS_DEF();
13323 break;
13324 case 5:
13325 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13326 {
13327 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13328 SET_SS_DEF();
13329 }
13330 else
13331 {
13332 uint32_t u32Disp;
13333 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13334 u32EffAddr += u32Disp;
13335 }
13336 break;
13337 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13338 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13339 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13340 }
13341 break;
13342 }
13343 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13344 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13345 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13346 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13347 }
13348
13349 /* Get and add the displacement. */
13350 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13351 {
13352 case 0:
13353 break;
13354 case 1:
13355 {
13356 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13357 u32EffAddr += i8Disp;
13358 break;
13359 }
13360 case 2:
13361 {
13362 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13363 u32EffAddr += u32Disp;
13364 break;
13365 }
13366 default:
13367 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13368 }
13369
13370 }
13371 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13372 *pGCPtrEff = u32EffAddr;
13373 else
13374 {
13375 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13376 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13377 }
13378 }
13379 }
13380 else
13381 {
13382 uint64_t u64EffAddr;
13383
13384 /* Handle the rip+disp32 form with no registers first. */
13385 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13386 {
13387 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13388 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13389 }
13390 else
13391 {
13392 /* Get the register (or SIB) value. */
13393 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13394 {
13395 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13396 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13397 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13398 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13399 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13400 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13401 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13402 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13403 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13404 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13405 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13406 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13407 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13408 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13409 /* SIB */
13410 case 4:
13411 case 12:
13412 {
13413 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13414
13415 /* Get the index and scale it. */
13416 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13417 {
13418 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13419 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13420 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13421 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13422 case 4: u64EffAddr = 0; /*none */ break;
13423 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13424 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13425 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13426 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13427 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13428 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13429 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13430 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13431 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13432 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13433 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13434 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13435 }
13436 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13437
13438 /* add base */
13439 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13440 {
13441 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13442 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13443 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13444 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13445 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13446 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13447 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13448 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13449 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13450 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13451 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13452 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13453 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13454 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13455 /* complicated encodings */
13456 case 5:
13457 case 13:
13458 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13459 {
13460 if (!pVCpu->iem.s.uRexB)
13461 {
13462 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13463 SET_SS_DEF();
13464 }
13465 else
13466 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13467 }
13468 else
13469 {
13470 uint32_t u32Disp;
13471 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13472 u64EffAddr += (int32_t)u32Disp;
13473 }
13474 break;
13475 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13476 }
13477 break;
13478 }
13479 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13480 }
13481
13482 /* Get and add the displacement. */
13483 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13484 {
13485 case 0:
13486 break;
13487 case 1:
13488 {
13489 int8_t i8Disp;
13490 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13491 u64EffAddr += i8Disp;
13492 break;
13493 }
13494 case 2:
13495 {
13496 uint32_t u32Disp;
13497 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13498 u64EffAddr += (int32_t)u32Disp;
13499 break;
13500 }
13501 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13502 }
13503
13504 }
13505
13506 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13507 *pGCPtrEff = u64EffAddr;
13508 else
13509 {
13510 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13511 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13512 }
13513 }
13514
13515 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13516 return VINF_SUCCESS;
13517}
13518
13519
13520#ifdef IEM_WITH_SETJMP
13521/**
13522 * Calculates the effective address of a ModR/M memory operand.
13523 *
13524 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13525 *
13526 * May longjmp on internal error.
13527 *
13528 * @return The effective address.
13529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13530 * @param bRm The ModRM byte.
13531 * @param cbImm The size of any immediate following the
13532 * effective address opcode bytes. Important for
13533 * RIP relative addressing.
13534 */
13535IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13536{
13537 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13538# define SET_SS_DEF() \
13539 do \
13540 { \
13541 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13542 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13543 } while (0)
13544
13545 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13546 {
13547/** @todo Check the effective address size crap! */
13548 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13549 {
13550 uint16_t u16EffAddr;
13551
13552 /* Handle the disp16 form with no registers first. */
13553 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13554 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13555 else
13556 {
13557 /* Get the displacment. */
13558 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13559 {
13560 case 0: u16EffAddr = 0; break;
13561 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13562 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13563 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13564 }
13565
13566 /* Add the base and index registers to the disp. */
13567 switch (bRm & X86_MODRM_RM_MASK)
13568 {
13569 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13570 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13571 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13572 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13573 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13574 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13575 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13576 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13577 }
13578 }
13579
13580 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13581 return u16EffAddr;
13582 }
13583
13584 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13585 uint32_t u32EffAddr;
13586
13587 /* Handle the disp32 form with no registers first. */
13588 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13589 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13590 else
13591 {
13592 /* Get the register (or SIB) value. */
13593 switch ((bRm & X86_MODRM_RM_MASK))
13594 {
13595 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13596 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13597 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13598 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13599 case 4: /* SIB */
13600 {
13601 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13602
13603 /* Get the index and scale it. */
13604 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13605 {
13606 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13607 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13608 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13609 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13610 case 4: u32EffAddr = 0; /*none */ break;
13611 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13612 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13613 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13614 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13615 }
13616 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13617
13618 /* add base */
13619 switch (bSib & X86_SIB_BASE_MASK)
13620 {
13621 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13622 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13623 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13624 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13625 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13626 case 5:
13627 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13628 {
13629 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13630 SET_SS_DEF();
13631 }
13632 else
13633 {
13634 uint32_t u32Disp;
13635 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13636 u32EffAddr += u32Disp;
13637 }
13638 break;
13639 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13640 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13641 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13642 }
13643 break;
13644 }
13645 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13646 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13647 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13648 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13649 }
13650
13651 /* Get and add the displacement. */
13652 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13653 {
13654 case 0:
13655 break;
13656 case 1:
13657 {
13658 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13659 u32EffAddr += i8Disp;
13660 break;
13661 }
13662 case 2:
13663 {
13664 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13665 u32EffAddr += u32Disp;
13666 break;
13667 }
13668 default:
13669 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13670 }
13671 }
13672
13673 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13674 {
13675 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13676 return u32EffAddr;
13677 }
13678 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13679 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13680 return u32EffAddr & UINT16_MAX;
13681 }
13682
13683 uint64_t u64EffAddr;
13684
13685 /* Handle the rip+disp32 form with no registers first. */
13686 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13687 {
13688 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13689 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13690 }
13691 else
13692 {
13693 /* Get the register (or SIB) value. */
13694 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13695 {
13696 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13697 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13698 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13699 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13700 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13701 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13702 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13703 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13704 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13705 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13706 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13707 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13708 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13709 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13710 /* SIB */
13711 case 4:
13712 case 12:
13713 {
13714 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13715
13716 /* Get the index and scale it. */
13717 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13718 {
13719 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13720 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13721 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13722 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13723 case 4: u64EffAddr = 0; /*none */ break;
13724 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13725 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13726 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13727 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13728 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13729 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13730 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13731 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13732 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13733 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13734 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13735 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13736 }
13737 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13738
13739 /* add base */
13740 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13741 {
13742 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13743 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13744 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13745 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13746 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13747 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13748 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13749 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13750 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13751 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13752 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13753 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13754 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13755 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13756 /* complicated encodings */
13757 case 5:
13758 case 13:
13759 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13760 {
13761 if (!pVCpu->iem.s.uRexB)
13762 {
13763 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13764 SET_SS_DEF();
13765 }
13766 else
13767 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13768 }
13769 else
13770 {
13771 uint32_t u32Disp;
13772 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13773 u64EffAddr += (int32_t)u32Disp;
13774 }
13775 break;
13776 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13777 }
13778 break;
13779 }
13780 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13781 }
13782
13783 /* Get and add the displacement. */
13784 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13785 {
13786 case 0:
13787 break;
13788 case 1:
13789 {
13790 int8_t i8Disp;
13791 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13792 u64EffAddr += i8Disp;
13793 break;
13794 }
13795 case 2:
13796 {
13797 uint32_t u32Disp;
13798 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13799 u64EffAddr += (int32_t)u32Disp;
13800 break;
13801 }
13802 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13803 }
13804
13805 }
13806
13807 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13808 {
13809 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13810 return u64EffAddr;
13811 }
13812 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13813 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13814 return u64EffAddr & UINT32_MAX;
13815}
13816#endif /* IEM_WITH_SETJMP */
13817
13818/** @} */
13819
13820
13821
13822/*
13823 * Include the instructions
13824 */
13825#include "IEMAllInstructions.cpp.h"
13826
13827
13828
13829#ifdef LOG_ENABLED
13830/**
13831 * Logs the current instruction.
13832 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13833 * @param fSameCtx Set if we have the same context information as the VMM,
13834 * clear if we may have already executed an instruction in
13835 * our debug context. When clear, we assume IEMCPU holds
13836 * valid CPU mode info.
13837 *
13838 * The @a fSameCtx parameter is now misleading and obsolete.
13839 * @param pszFunction The IEM function doing the execution.
13840 */
13841IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13842{
13843# ifdef IN_RING3
13844 if (LogIs2Enabled())
13845 {
13846 char szInstr[256];
13847 uint32_t cbInstr = 0;
13848 if (fSameCtx)
13849 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13850 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13851 szInstr, sizeof(szInstr), &cbInstr);
13852 else
13853 {
13854 uint32_t fFlags = 0;
13855 switch (pVCpu->iem.s.enmCpuMode)
13856 {
13857 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13858 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13859 case IEMMODE_16BIT:
13860 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13861 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13862 else
13863 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13864 break;
13865 }
13866 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13867 szInstr, sizeof(szInstr), &cbInstr);
13868 }
13869
13870 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13871 Log2(("**** %s\n"
13872 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13873 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13874 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13875 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13876 " %s\n"
13877 , pszFunction,
13878 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13879 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13880 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13881 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13882 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13883 szInstr));
13884
13885 if (LogIs3Enabled())
13886 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13887 }
13888 else
13889# endif
13890 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13891 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13892 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13893}
13894#endif /* LOG_ENABLED */
13895
13896
13897/**
13898 * Makes status code addjustments (pass up from I/O and access handler)
13899 * as well as maintaining statistics.
13900 *
13901 * @returns Strict VBox status code to pass up.
13902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13903 * @param rcStrict The status from executing an instruction.
13904 */
13905DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13906{
13907 if (rcStrict != VINF_SUCCESS)
13908 {
13909 if (RT_SUCCESS(rcStrict))
13910 {
13911 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13912 || rcStrict == VINF_IOM_R3_IOPORT_READ
13913 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13914 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13915 || rcStrict == VINF_IOM_R3_MMIO_READ
13916 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13917 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13918 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13919 || rcStrict == VINF_CPUM_R3_MSR_READ
13920 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13921 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13922 || rcStrict == VINF_EM_RAW_TO_R3
13923 || rcStrict == VINF_EM_TRIPLE_FAULT
13924 || rcStrict == VINF_GIM_R3_HYPERCALL
13925 /* raw-mode / virt handlers only: */
13926 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13927 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13928 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13929 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13930 || rcStrict == VINF_SELM_SYNC_GDT
13931 || rcStrict == VINF_CSAM_PENDING_ACTION
13932 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13933 /* nested hw.virt codes: */
13934 || rcStrict == VINF_VMX_VMEXIT
13935 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13936 || rcStrict == VINF_SVM_VMEXIT
13937 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13938/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13939 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13940#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13941 if ( rcStrict == VINF_VMX_VMEXIT
13942 && rcPassUp == VINF_SUCCESS)
13943 rcStrict = VINF_SUCCESS;
13944 else
13945#endif
13946#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13947 if ( rcStrict == VINF_SVM_VMEXIT
13948 && rcPassUp == VINF_SUCCESS)
13949 rcStrict = VINF_SUCCESS;
13950 else
13951#endif
13952 if (rcPassUp == VINF_SUCCESS)
13953 pVCpu->iem.s.cRetInfStatuses++;
13954 else if ( rcPassUp < VINF_EM_FIRST
13955 || rcPassUp > VINF_EM_LAST
13956 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13957 {
13958 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13959 pVCpu->iem.s.cRetPassUpStatus++;
13960 rcStrict = rcPassUp;
13961 }
13962 else
13963 {
13964 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13965 pVCpu->iem.s.cRetInfStatuses++;
13966 }
13967 }
13968 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13969 pVCpu->iem.s.cRetAspectNotImplemented++;
13970 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13971 pVCpu->iem.s.cRetInstrNotImplemented++;
13972 else
13973 pVCpu->iem.s.cRetErrStatuses++;
13974 }
13975 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13976 {
13977 pVCpu->iem.s.cRetPassUpStatus++;
13978 rcStrict = pVCpu->iem.s.rcPassUp;
13979 }
13980
13981 return rcStrict;
13982}
13983
13984
13985/**
13986 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13987 * IEMExecOneWithPrefetchedByPC.
13988 *
13989 * Similar code is found in IEMExecLots.
13990 *
13991 * @return Strict VBox status code.
13992 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13993 * @param fExecuteInhibit If set, execute the instruction following CLI,
13994 * POP SS and MOV SS,GR.
13995 * @param pszFunction The calling function name.
13996 */
13997DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13998{
13999 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14000 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14001 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14002 RT_NOREF_PV(pszFunction);
14003
14004#ifdef IEM_WITH_SETJMP
14005 VBOXSTRICTRC rcStrict;
14006 jmp_buf JmpBuf;
14007 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14008 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14009 if ((rcStrict = setjmp(JmpBuf)) == 0)
14010 {
14011 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14012 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14013 }
14014 else
14015 pVCpu->iem.s.cLongJumps++;
14016 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14017#else
14018 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14019 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14020#endif
14021 if (rcStrict == VINF_SUCCESS)
14022 pVCpu->iem.s.cInstructions++;
14023 if (pVCpu->iem.s.cActiveMappings > 0)
14024 {
14025 Assert(rcStrict != VINF_SUCCESS);
14026 iemMemRollback(pVCpu);
14027 }
14028 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14029 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14030 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14031
14032//#ifdef DEBUG
14033// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14034//#endif
14035
14036#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14037 /*
14038 * Perform any VMX nested-guest instruction boundary actions.
14039 *
14040 * If any of these causes a VM-exit, we must skip executing the next
14041 * instruction (would run into stale page tables). A VM-exit makes sure
14042 * there is no interrupt-inhibition, so that should ensure we don't go
14043 * to try execute the next instruction. Clearing fExecuteInhibit is
14044 * problematic because of the setjmp/longjmp clobbering above.
14045 */
14046 if ( rcStrict == VINF_SUCCESS
14047 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14048 {
14049 bool fCheckRemainingIntercepts = true;
14050 /* TPR-below threshold/APIC write has the highest priority. */
14051 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14052 {
14053 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14054 fCheckRemainingIntercepts = false;
14055 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14056 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14057 }
14058 /* MTF takes priority over VMX-preemption timer. */
14059 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14060 {
14061 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
14062 fCheckRemainingIntercepts = false;
14063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14064 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14065 }
14066 /* VMX preemption timer takes priority over NMI-window exits. */
14067 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14068 {
14069 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14070 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14071 rcStrict = VINF_SUCCESS;
14072 else
14073 {
14074 fCheckRemainingIntercepts = false;
14075 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14076 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14077 }
14078 }
14079
14080 /*
14081 * Check remaining intercepts.
14082 *
14083 * NMI-window and Interrupt-window VM-exits.
14084 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
14085 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
14086 *
14087 * See Intel spec. 26.7.6 "NMI-Window Exiting".
14088 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
14089 */
14090 if ( fCheckRemainingIntercepts
14091 && !TRPMHasTrap(pVCpu)
14092 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
14093 {
14094 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents);
14095 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
14096 && CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
14097 {
14098 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
14099 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14100 }
14101 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
14102 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
14103 {
14104 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
14105 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
14106 }
14107 }
14108 }
14109#endif
14110
14111 /* Execute the next instruction as well if a cli, pop ss or
14112 mov ss, Gr has just completed successfully. */
14113 if ( fExecuteInhibit
14114 && rcStrict == VINF_SUCCESS
14115 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14116 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14117 {
14118 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14119 if (rcStrict == VINF_SUCCESS)
14120 {
14121#ifdef LOG_ENABLED
14122 iemLogCurInstr(pVCpu, false, pszFunction);
14123#endif
14124#ifdef IEM_WITH_SETJMP
14125 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14126 if ((rcStrict = setjmp(JmpBuf)) == 0)
14127 {
14128 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14129 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14130 }
14131 else
14132 pVCpu->iem.s.cLongJumps++;
14133 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14134#else
14135 IEM_OPCODE_GET_NEXT_U8(&b);
14136 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14137#endif
14138 if (rcStrict == VINF_SUCCESS)
14139 pVCpu->iem.s.cInstructions++;
14140 if (pVCpu->iem.s.cActiveMappings > 0)
14141 {
14142 Assert(rcStrict != VINF_SUCCESS);
14143 iemMemRollback(pVCpu);
14144 }
14145 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14146 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14147 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14148 }
14149 else if (pVCpu->iem.s.cActiveMappings > 0)
14150 iemMemRollback(pVCpu);
14151 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14152 }
14153
14154 /*
14155 * Return value fiddling, statistics and sanity assertions.
14156 */
14157 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14158
14159 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14160 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14161 return rcStrict;
14162}
14163
14164
14165#ifdef IN_RC
14166/**
14167 * Re-enters raw-mode or ensure we return to ring-3.
14168 *
14169 * @returns rcStrict, maybe modified.
14170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14171 * @param rcStrict The status code returne by the interpreter.
14172 */
14173DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14174{
14175 if ( !pVCpu->iem.s.fInPatchCode
14176 && ( rcStrict == VINF_SUCCESS
14177 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14178 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14179 {
14180 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14181 CPUMRawEnter(pVCpu);
14182 else
14183 {
14184 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14185 rcStrict = VINF_EM_RESCHEDULE;
14186 }
14187 }
14188 return rcStrict;
14189}
14190#endif
14191
14192
14193/**
14194 * Execute one instruction.
14195 *
14196 * @return Strict VBox status code.
14197 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14198 */
14199VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14200{
14201#ifdef LOG_ENABLED
14202 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14203#endif
14204
14205 /*
14206 * Do the decoding and emulation.
14207 */
14208 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14209 if (rcStrict == VINF_SUCCESS)
14210 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14211 else if (pVCpu->iem.s.cActiveMappings > 0)
14212 iemMemRollback(pVCpu);
14213
14214#ifdef IN_RC
14215 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14216#endif
14217 if (rcStrict != VINF_SUCCESS)
14218 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14219 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14220 return rcStrict;
14221}
14222
14223
14224VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14225{
14226 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14227
14228 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14229 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14230 if (rcStrict == VINF_SUCCESS)
14231 {
14232 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14233 if (pcbWritten)
14234 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14235 }
14236 else if (pVCpu->iem.s.cActiveMappings > 0)
14237 iemMemRollback(pVCpu);
14238
14239#ifdef IN_RC
14240 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14241#endif
14242 return rcStrict;
14243}
14244
14245
14246VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14247 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14248{
14249 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14250
14251 VBOXSTRICTRC rcStrict;
14252 if ( cbOpcodeBytes
14253 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14254 {
14255 iemInitDecoder(pVCpu, false);
14256#ifdef IEM_WITH_CODE_TLB
14257 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14258 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14259 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14260 pVCpu->iem.s.offCurInstrStart = 0;
14261 pVCpu->iem.s.offInstrNextByte = 0;
14262#else
14263 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14264 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14265#endif
14266 rcStrict = VINF_SUCCESS;
14267 }
14268 else
14269 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14270 if (rcStrict == VINF_SUCCESS)
14271 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14272 else if (pVCpu->iem.s.cActiveMappings > 0)
14273 iemMemRollback(pVCpu);
14274
14275#ifdef IN_RC
14276 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14277#endif
14278 return rcStrict;
14279}
14280
14281
14282VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14283{
14284 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14285
14286 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14287 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14288 if (rcStrict == VINF_SUCCESS)
14289 {
14290 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14291 if (pcbWritten)
14292 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14293 }
14294 else if (pVCpu->iem.s.cActiveMappings > 0)
14295 iemMemRollback(pVCpu);
14296
14297#ifdef IN_RC
14298 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14299#endif
14300 return rcStrict;
14301}
14302
14303
14304VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14305 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14306{
14307 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14308
14309 VBOXSTRICTRC rcStrict;
14310 if ( cbOpcodeBytes
14311 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14312 {
14313 iemInitDecoder(pVCpu, true);
14314#ifdef IEM_WITH_CODE_TLB
14315 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14316 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14317 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14318 pVCpu->iem.s.offCurInstrStart = 0;
14319 pVCpu->iem.s.offInstrNextByte = 0;
14320#else
14321 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14322 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14323#endif
14324 rcStrict = VINF_SUCCESS;
14325 }
14326 else
14327 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14328 if (rcStrict == VINF_SUCCESS)
14329 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14330 else if (pVCpu->iem.s.cActiveMappings > 0)
14331 iemMemRollback(pVCpu);
14332
14333#ifdef IN_RC
14334 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14335#endif
14336 return rcStrict;
14337}
14338
14339
14340/**
14341 * For debugging DISGetParamSize, may come in handy.
14342 *
14343 * @returns Strict VBox status code.
14344 * @param pVCpu The cross context virtual CPU structure of the
14345 * calling EMT.
14346 * @param pCtxCore The context core structure.
14347 * @param OpcodeBytesPC The PC of the opcode bytes.
14348 * @param pvOpcodeBytes Prefeched opcode bytes.
14349 * @param cbOpcodeBytes Number of prefetched bytes.
14350 * @param pcbWritten Where to return the number of bytes written.
14351 * Optional.
14352 */
14353VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14354 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14355 uint32_t *pcbWritten)
14356{
14357 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14358
14359 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14360 VBOXSTRICTRC rcStrict;
14361 if ( cbOpcodeBytes
14362 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14363 {
14364 iemInitDecoder(pVCpu, true);
14365#ifdef IEM_WITH_CODE_TLB
14366 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14367 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14368 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14369 pVCpu->iem.s.offCurInstrStart = 0;
14370 pVCpu->iem.s.offInstrNextByte = 0;
14371#else
14372 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14373 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14374#endif
14375 rcStrict = VINF_SUCCESS;
14376 }
14377 else
14378 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14379 if (rcStrict == VINF_SUCCESS)
14380 {
14381 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14382 if (pcbWritten)
14383 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14384 }
14385 else if (pVCpu->iem.s.cActiveMappings > 0)
14386 iemMemRollback(pVCpu);
14387
14388#ifdef IN_RC
14389 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14390#endif
14391 return rcStrict;
14392}
14393
14394
14395VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14396{
14397 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14398 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14399
14400 /*
14401 * See if there is an interrupt pending in TRPM, inject it if we can.
14402 */
14403 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14404#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14405 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14406 if (fIntrEnabled)
14407 {
14408 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14409 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14410 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14411 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14412 else
14413 {
14414 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14415 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14416 }
14417 }
14418#else
14419 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14420#endif
14421
14422 /** @todo What if we are injecting an exception and not an interrupt? Is that
14423 * possible here? */
14424 if ( fIntrEnabled
14425 && TRPMHasTrap(pVCpu)
14426 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14427 {
14428 uint8_t u8TrapNo;
14429 TRPMEVENT enmType;
14430 RTGCUINT uErrCode;
14431 RTGCPTR uCr2;
14432 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14433 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14434 TRPMResetTrap(pVCpu);
14435#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14436 /* Injecting an event may cause a VM-exit. */
14437 if ( rcStrict != VINF_SUCCESS
14438 && rcStrict != VINF_IEM_RAISED_XCPT)
14439 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14440#else
14441 NOREF(rcStrict);
14442#endif
14443 }
14444
14445 /*
14446 * Initial decoder init w/ prefetch, then setup setjmp.
14447 */
14448 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14449 if (rcStrict == VINF_SUCCESS)
14450 {
14451#ifdef IEM_WITH_SETJMP
14452 jmp_buf JmpBuf;
14453 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14454 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14455 pVCpu->iem.s.cActiveMappings = 0;
14456 if ((rcStrict = setjmp(JmpBuf)) == 0)
14457#endif
14458 {
14459 /*
14460 * The run loop. We limit ourselves to 4096 instructions right now.
14461 */
14462 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14463 PVM pVM = pVCpu->CTX_SUFF(pVM);
14464 for (;;)
14465 {
14466 /*
14467 * Log the state.
14468 */
14469#ifdef LOG_ENABLED
14470 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14471#endif
14472
14473 /*
14474 * Do the decoding and emulation.
14475 */
14476 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14477 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14478 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14479 {
14480 Assert(pVCpu->iem.s.cActiveMappings == 0);
14481 pVCpu->iem.s.cInstructions++;
14482 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14483 {
14484 uint64_t fCpu = pVCpu->fLocalForcedActions
14485 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14486 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14487 | VMCPU_FF_TLB_FLUSH
14488#ifdef VBOX_WITH_RAW_MODE
14489 | VMCPU_FF_TRPM_SYNC_IDT
14490 | VMCPU_FF_SELM_SYNC_TSS
14491 | VMCPU_FF_SELM_SYNC_GDT
14492 | VMCPU_FF_SELM_SYNC_LDT
14493#endif
14494 | VMCPU_FF_INHIBIT_INTERRUPTS
14495 | VMCPU_FF_BLOCK_NMIS
14496 | VMCPU_FF_UNHALT ));
14497
14498 if (RT_LIKELY( ( !fCpu
14499 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14500 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14501 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14502 {
14503 if (cMaxInstructionsGccStupidity-- > 0)
14504 {
14505 /* Poll timers every now an then according to the caller's specs. */
14506 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14507 || !TMTimerPollBool(pVM, pVCpu))
14508 {
14509 Assert(pVCpu->iem.s.cActiveMappings == 0);
14510 iemReInitDecoder(pVCpu);
14511 continue;
14512 }
14513 }
14514 }
14515 }
14516 Assert(pVCpu->iem.s.cActiveMappings == 0);
14517 }
14518 else if (pVCpu->iem.s.cActiveMappings > 0)
14519 iemMemRollback(pVCpu);
14520 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14521 break;
14522 }
14523 }
14524#ifdef IEM_WITH_SETJMP
14525 else
14526 {
14527 if (pVCpu->iem.s.cActiveMappings > 0)
14528 iemMemRollback(pVCpu);
14529# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14530 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14531# endif
14532 pVCpu->iem.s.cLongJumps++;
14533 }
14534 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14535#endif
14536
14537 /*
14538 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14539 */
14540 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14541 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14542 }
14543 else
14544 {
14545 if (pVCpu->iem.s.cActiveMappings > 0)
14546 iemMemRollback(pVCpu);
14547
14548#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14549 /*
14550 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14551 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14552 */
14553 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14554#endif
14555 }
14556
14557 /*
14558 * Maybe re-enter raw-mode and log.
14559 */
14560#ifdef IN_RC
14561 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14562#endif
14563 if (rcStrict != VINF_SUCCESS)
14564 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14565 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14566 if (pcInstructions)
14567 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14568 return rcStrict;
14569}
14570
14571
14572/**
14573 * Interface used by EMExecuteExec, does exit statistics and limits.
14574 *
14575 * @returns Strict VBox status code.
14576 * @param pVCpu The cross context virtual CPU structure.
14577 * @param fWillExit To be defined.
14578 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14579 * @param cMaxInstructions Maximum number of instructions to execute.
14580 * @param cMaxInstructionsWithoutExits
14581 * The max number of instructions without exits.
14582 * @param pStats Where to return statistics.
14583 */
14584VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14585 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14586{
14587 NOREF(fWillExit); /** @todo define flexible exit crits */
14588
14589 /*
14590 * Initialize return stats.
14591 */
14592 pStats->cInstructions = 0;
14593 pStats->cExits = 0;
14594 pStats->cMaxExitDistance = 0;
14595 pStats->cReserved = 0;
14596
14597 /*
14598 * Initial decoder init w/ prefetch, then setup setjmp.
14599 */
14600 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14601 if (rcStrict == VINF_SUCCESS)
14602 {
14603#ifdef IEM_WITH_SETJMP
14604 jmp_buf JmpBuf;
14605 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14606 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14607 pVCpu->iem.s.cActiveMappings = 0;
14608 if ((rcStrict = setjmp(JmpBuf)) == 0)
14609#endif
14610 {
14611#ifdef IN_RING0
14612 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14613#endif
14614 uint32_t cInstructionSinceLastExit = 0;
14615
14616 /*
14617 * The run loop. We limit ourselves to 4096 instructions right now.
14618 */
14619 PVM pVM = pVCpu->CTX_SUFF(pVM);
14620 for (;;)
14621 {
14622 /*
14623 * Log the state.
14624 */
14625#ifdef LOG_ENABLED
14626 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14627#endif
14628
14629 /*
14630 * Do the decoding and emulation.
14631 */
14632 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14633
14634 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14635 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14636
14637 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14638 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14639 {
14640 pStats->cExits += 1;
14641 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14642 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14643 cInstructionSinceLastExit = 0;
14644 }
14645
14646 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14647 {
14648 Assert(pVCpu->iem.s.cActiveMappings == 0);
14649 pVCpu->iem.s.cInstructions++;
14650 pStats->cInstructions++;
14651 cInstructionSinceLastExit++;
14652 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14653 {
14654 uint64_t fCpu = pVCpu->fLocalForcedActions
14655 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14656 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14657 | VMCPU_FF_TLB_FLUSH
14658#ifdef VBOX_WITH_RAW_MODE
14659 | VMCPU_FF_TRPM_SYNC_IDT
14660 | VMCPU_FF_SELM_SYNC_TSS
14661 | VMCPU_FF_SELM_SYNC_GDT
14662 | VMCPU_FF_SELM_SYNC_LDT
14663#endif
14664 | VMCPU_FF_INHIBIT_INTERRUPTS
14665 | VMCPU_FF_BLOCK_NMIS
14666 | VMCPU_FF_UNHALT ));
14667
14668 if (RT_LIKELY( ( ( !fCpu
14669 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14670 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14671 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14672 || pStats->cInstructions < cMinInstructions))
14673 {
14674 if (pStats->cInstructions < cMaxInstructions)
14675 {
14676 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14677 {
14678#ifdef IN_RING0
14679 if ( !fCheckPreemptionPending
14680 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14681#endif
14682 {
14683 Assert(pVCpu->iem.s.cActiveMappings == 0);
14684 iemReInitDecoder(pVCpu);
14685 continue;
14686 }
14687#ifdef IN_RING0
14688 rcStrict = VINF_EM_RAW_INTERRUPT;
14689 break;
14690#endif
14691 }
14692 }
14693 }
14694 Assert(!(fCpu & VMCPU_FF_IEM));
14695 }
14696 Assert(pVCpu->iem.s.cActiveMappings == 0);
14697 }
14698 else if (pVCpu->iem.s.cActiveMappings > 0)
14699 iemMemRollback(pVCpu);
14700 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14701 break;
14702 }
14703 }
14704#ifdef IEM_WITH_SETJMP
14705 else
14706 {
14707 if (pVCpu->iem.s.cActiveMappings > 0)
14708 iemMemRollback(pVCpu);
14709 pVCpu->iem.s.cLongJumps++;
14710 }
14711 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14712#endif
14713
14714 /*
14715 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14716 */
14717 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14718 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14719 }
14720 else
14721 {
14722 if (pVCpu->iem.s.cActiveMappings > 0)
14723 iemMemRollback(pVCpu);
14724
14725#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14726 /*
14727 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14728 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14729 */
14730 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14731#endif
14732 }
14733
14734 /*
14735 * Maybe re-enter raw-mode and log.
14736 */
14737#ifdef IN_RC
14738 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14739#endif
14740 if (rcStrict != VINF_SUCCESS)
14741 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14742 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14743 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14744 return rcStrict;
14745}
14746
14747
14748/**
14749 * Injects a trap, fault, abort, software interrupt or external interrupt.
14750 *
14751 * The parameter list matches TRPMQueryTrapAll pretty closely.
14752 *
14753 * @returns Strict VBox status code.
14754 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14755 * @param u8TrapNo The trap number.
14756 * @param enmType What type is it (trap/fault/abort), software
14757 * interrupt or hardware interrupt.
14758 * @param uErrCode The error code if applicable.
14759 * @param uCr2 The CR2 value if applicable.
14760 * @param cbInstr The instruction length (only relevant for
14761 * software interrupts).
14762 */
14763VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14764 uint8_t cbInstr)
14765{
14766 iemInitDecoder(pVCpu, false);
14767#ifdef DBGFTRACE_ENABLED
14768 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14769 u8TrapNo, enmType, uErrCode, uCr2);
14770#endif
14771
14772 uint32_t fFlags;
14773 switch (enmType)
14774 {
14775 case TRPM_HARDWARE_INT:
14776 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14777 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14778 uErrCode = uCr2 = 0;
14779 break;
14780
14781 case TRPM_SOFTWARE_INT:
14782 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14783 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14784 uErrCode = uCr2 = 0;
14785 break;
14786
14787 case TRPM_TRAP:
14788 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14789 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14790 if (u8TrapNo == X86_XCPT_PF)
14791 fFlags |= IEM_XCPT_FLAGS_CR2;
14792 switch (u8TrapNo)
14793 {
14794 case X86_XCPT_DF:
14795 case X86_XCPT_TS:
14796 case X86_XCPT_NP:
14797 case X86_XCPT_SS:
14798 case X86_XCPT_PF:
14799 case X86_XCPT_AC:
14800 fFlags |= IEM_XCPT_FLAGS_ERR;
14801 break;
14802 }
14803 break;
14804
14805 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14806 }
14807
14808 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14809
14810 if (pVCpu->iem.s.cActiveMappings > 0)
14811 iemMemRollback(pVCpu);
14812
14813 return rcStrict;
14814}
14815
14816
14817/**
14818 * Injects the active TRPM event.
14819 *
14820 * @returns Strict VBox status code.
14821 * @param pVCpu The cross context virtual CPU structure.
14822 */
14823VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14824{
14825#ifndef IEM_IMPLEMENTS_TASKSWITCH
14826 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14827#else
14828 uint8_t u8TrapNo;
14829 TRPMEVENT enmType;
14830 RTGCUINT uErrCode;
14831 RTGCUINTPTR uCr2;
14832 uint8_t cbInstr;
14833 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14834 if (RT_FAILURE(rc))
14835 return rc;
14836
14837 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14838#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14839 if (rcStrict == VINF_SVM_VMEXIT)
14840 rcStrict = VINF_SUCCESS;
14841#endif
14842#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14843 if (rcStrict == VINF_VMX_VMEXIT)
14844 rcStrict = VINF_SUCCESS;
14845#endif
14846 /** @todo Are there any other codes that imply the event was successfully
14847 * delivered to the guest? See @bugref{6607}. */
14848 if ( rcStrict == VINF_SUCCESS
14849 || rcStrict == VINF_IEM_RAISED_XCPT)
14850 TRPMResetTrap(pVCpu);
14851
14852 return rcStrict;
14853#endif
14854}
14855
14856
14857VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14858{
14859 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14860 return VERR_NOT_IMPLEMENTED;
14861}
14862
14863
14864VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14865{
14866 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14867 return VERR_NOT_IMPLEMENTED;
14868}
14869
14870
14871#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14872/**
14873 * Executes a IRET instruction with default operand size.
14874 *
14875 * This is for PATM.
14876 *
14877 * @returns VBox status code.
14878 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14879 * @param pCtxCore The register frame.
14880 */
14881VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14882{
14883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14884
14885 iemCtxCoreToCtx(pCtx, pCtxCore);
14886 iemInitDecoder(pVCpu);
14887 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14888 if (rcStrict == VINF_SUCCESS)
14889 iemCtxToCtxCore(pCtxCore, pCtx);
14890 else
14891 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14892 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14893 return rcStrict;
14894}
14895#endif
14896
14897
14898/**
14899 * Macro used by the IEMExec* method to check the given instruction length.
14900 *
14901 * Will return on failure!
14902 *
14903 * @param a_cbInstr The given instruction length.
14904 * @param a_cbMin The minimum length.
14905 */
14906#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14907 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14908 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14909
14910
14911/**
14912 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14913 *
14914 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14915 *
14916 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14918 * @param rcStrict The status code to fiddle.
14919 */
14920DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14921{
14922 iemUninitExec(pVCpu);
14923#ifdef IN_RC
14924 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14925#else
14926 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14927#endif
14928}
14929
14930
14931/**
14932 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14933 *
14934 * This API ASSUMES that the caller has already verified that the guest code is
14935 * allowed to access the I/O port. (The I/O port is in the DX register in the
14936 * guest state.)
14937 *
14938 * @returns Strict VBox status code.
14939 * @param pVCpu The cross context virtual CPU structure.
14940 * @param cbValue The size of the I/O port access (1, 2, or 4).
14941 * @param enmAddrMode The addressing mode.
14942 * @param fRepPrefix Indicates whether a repeat prefix is used
14943 * (doesn't matter which for this instruction).
14944 * @param cbInstr The instruction length in bytes.
14945 * @param iEffSeg The effective segment address.
14946 * @param fIoChecked Whether the access to the I/O port has been
14947 * checked or not. It's typically checked in the
14948 * HM scenario.
14949 */
14950VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14951 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14952{
14953 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14954 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14955
14956 /*
14957 * State init.
14958 */
14959 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14960
14961 /*
14962 * Switch orgy for getting to the right handler.
14963 */
14964 VBOXSTRICTRC rcStrict;
14965 if (fRepPrefix)
14966 {
14967 switch (enmAddrMode)
14968 {
14969 case IEMMODE_16BIT:
14970 switch (cbValue)
14971 {
14972 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14973 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14974 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14975 default:
14976 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14977 }
14978 break;
14979
14980 case IEMMODE_32BIT:
14981 switch (cbValue)
14982 {
14983 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14984 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14985 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14986 default:
14987 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14988 }
14989 break;
14990
14991 case IEMMODE_64BIT:
14992 switch (cbValue)
14993 {
14994 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14995 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14996 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14997 default:
14998 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14999 }
15000 break;
15001
15002 default:
15003 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15004 }
15005 }
15006 else
15007 {
15008 switch (enmAddrMode)
15009 {
15010 case IEMMODE_16BIT:
15011 switch (cbValue)
15012 {
15013 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15014 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15015 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15016 default:
15017 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15018 }
15019 break;
15020
15021 case IEMMODE_32BIT:
15022 switch (cbValue)
15023 {
15024 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15025 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15026 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15027 default:
15028 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15029 }
15030 break;
15031
15032 case IEMMODE_64BIT:
15033 switch (cbValue)
15034 {
15035 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15036 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15037 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15038 default:
15039 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15040 }
15041 break;
15042
15043 default:
15044 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15045 }
15046 }
15047
15048 if (pVCpu->iem.s.cActiveMappings)
15049 iemMemRollback(pVCpu);
15050
15051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15052}
15053
15054
15055/**
15056 * Interface for HM and EM for executing string I/O IN (read) instructions.
15057 *
15058 * This API ASSUMES that the caller has already verified that the guest code is
15059 * allowed to access the I/O port. (The I/O port is in the DX register in the
15060 * guest state.)
15061 *
15062 * @returns Strict VBox status code.
15063 * @param pVCpu The cross context virtual CPU structure.
15064 * @param cbValue The size of the I/O port access (1, 2, or 4).
15065 * @param enmAddrMode The addressing mode.
15066 * @param fRepPrefix Indicates whether a repeat prefix is used
15067 * (doesn't matter which for this instruction).
15068 * @param cbInstr The instruction length in bytes.
15069 * @param fIoChecked Whether the access to the I/O port has been
15070 * checked or not. It's typically checked in the
15071 * HM scenario.
15072 */
15073VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15074 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15075{
15076 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15077
15078 /*
15079 * State init.
15080 */
15081 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15082
15083 /*
15084 * Switch orgy for getting to the right handler.
15085 */
15086 VBOXSTRICTRC rcStrict;
15087 if (fRepPrefix)
15088 {
15089 switch (enmAddrMode)
15090 {
15091 case IEMMODE_16BIT:
15092 switch (cbValue)
15093 {
15094 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15095 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15096 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15097 default:
15098 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15099 }
15100 break;
15101
15102 case IEMMODE_32BIT:
15103 switch (cbValue)
15104 {
15105 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15106 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15107 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15108 default:
15109 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15110 }
15111 break;
15112
15113 case IEMMODE_64BIT:
15114 switch (cbValue)
15115 {
15116 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15117 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15118 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15119 default:
15120 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15121 }
15122 break;
15123
15124 default:
15125 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15126 }
15127 }
15128 else
15129 {
15130 switch (enmAddrMode)
15131 {
15132 case IEMMODE_16BIT:
15133 switch (cbValue)
15134 {
15135 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15136 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15137 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15138 default:
15139 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15140 }
15141 break;
15142
15143 case IEMMODE_32BIT:
15144 switch (cbValue)
15145 {
15146 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15147 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15148 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15149 default:
15150 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15151 }
15152 break;
15153
15154 case IEMMODE_64BIT:
15155 switch (cbValue)
15156 {
15157 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15158 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15159 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15160 default:
15161 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15162 }
15163 break;
15164
15165 default:
15166 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15167 }
15168 }
15169
15170 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15171 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15172}
15173
15174
15175/**
15176 * Interface for rawmode to write execute an OUT instruction.
15177 *
15178 * @returns Strict VBox status code.
15179 * @param pVCpu The cross context virtual CPU structure.
15180 * @param cbInstr The instruction length in bytes.
15181 * @param u16Port The port to read.
15182 * @param fImm Whether the port is specified using an immediate operand or
15183 * using the implicit DX register.
15184 * @param cbReg The register size.
15185 *
15186 * @remarks In ring-0 not all of the state needs to be synced in.
15187 */
15188VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15189{
15190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15191 Assert(cbReg <= 4 && cbReg != 3);
15192
15193 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15194 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15195 Assert(!pVCpu->iem.s.cActiveMappings);
15196 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15197}
15198
15199
15200/**
15201 * Interface for rawmode to write execute an IN instruction.
15202 *
15203 * @returns Strict VBox status code.
15204 * @param pVCpu The cross context virtual CPU structure.
15205 * @param cbInstr The instruction length in bytes.
15206 * @param u16Port The port to read.
15207 * @param fImm Whether the port is specified using an immediate operand or
15208 * using the implicit DX.
15209 * @param cbReg The register size.
15210 */
15211VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15212{
15213 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15214 Assert(cbReg <= 4 && cbReg != 3);
15215
15216 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15217 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15218 Assert(!pVCpu->iem.s.cActiveMappings);
15219 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15220}
15221
15222
15223/**
15224 * Interface for HM and EM to write to a CRx register.
15225 *
15226 * @returns Strict VBox status code.
15227 * @param pVCpu The cross context virtual CPU structure.
15228 * @param cbInstr The instruction length in bytes.
15229 * @param iCrReg The control register number (destination).
15230 * @param iGReg The general purpose register number (source).
15231 *
15232 * @remarks In ring-0 not all of the state needs to be synced in.
15233 */
15234VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15235{
15236 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15237 Assert(iCrReg < 16);
15238 Assert(iGReg < 16);
15239
15240 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15241 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15242 Assert(!pVCpu->iem.s.cActiveMappings);
15243 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15244}
15245
15246
15247/**
15248 * Interface for HM and EM to read from a CRx register.
15249 *
15250 * @returns Strict VBox status code.
15251 * @param pVCpu The cross context virtual CPU structure.
15252 * @param cbInstr The instruction length in bytes.
15253 * @param iGReg The general purpose register number (destination).
15254 * @param iCrReg The control register number (source).
15255 *
15256 * @remarks In ring-0 not all of the state needs to be synced in.
15257 */
15258VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15259{
15260 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15261 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15262 | CPUMCTX_EXTRN_APIC_TPR);
15263 Assert(iCrReg < 16);
15264 Assert(iGReg < 16);
15265
15266 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15267 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15268 Assert(!pVCpu->iem.s.cActiveMappings);
15269 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15270}
15271
15272
15273/**
15274 * Interface for HM and EM to clear the CR0[TS] bit.
15275 *
15276 * @returns Strict VBox status code.
15277 * @param pVCpu The cross context virtual CPU structure.
15278 * @param cbInstr The instruction length in bytes.
15279 *
15280 * @remarks In ring-0 not all of the state needs to be synced in.
15281 */
15282VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15283{
15284 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15285
15286 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15287 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15288 Assert(!pVCpu->iem.s.cActiveMappings);
15289 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15290}
15291
15292
15293/**
15294 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15295 *
15296 * @returns Strict VBox status code.
15297 * @param pVCpu The cross context virtual CPU structure.
15298 * @param cbInstr The instruction length in bytes.
15299 * @param uValue The value to load into CR0.
15300 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15301 * memory operand. Otherwise pass NIL_RTGCPTR.
15302 *
15303 * @remarks In ring-0 not all of the state needs to be synced in.
15304 */
15305VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15306{
15307 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15308
15309 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15310 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15311 Assert(!pVCpu->iem.s.cActiveMappings);
15312 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15313}
15314
15315
15316/**
15317 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15318 *
15319 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15320 *
15321 * @returns Strict VBox status code.
15322 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15323 * @param cbInstr The instruction length in bytes.
15324 * @remarks In ring-0 not all of the state needs to be synced in.
15325 * @thread EMT(pVCpu)
15326 */
15327VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15328{
15329 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15330
15331 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15332 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15333 Assert(!pVCpu->iem.s.cActiveMappings);
15334 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15335}
15336
15337
15338/**
15339 * Interface for HM and EM to emulate the WBINVD instruction.
15340 *
15341 * @returns Strict VBox status code.
15342 * @param pVCpu The cross context virtual CPU structure.
15343 * @param cbInstr The instruction length in bytes.
15344 *
15345 * @remarks In ring-0 not all of the state needs to be synced in.
15346 */
15347VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15348{
15349 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15350
15351 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15352 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15353 Assert(!pVCpu->iem.s.cActiveMappings);
15354 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15355}
15356
15357
15358/**
15359 * Interface for HM and EM to emulate the INVD instruction.
15360 *
15361 * @returns Strict VBox status code.
15362 * @param pVCpu The cross context virtual CPU structure.
15363 * @param cbInstr The instruction length in bytes.
15364 *
15365 * @remarks In ring-0 not all of the state needs to be synced in.
15366 */
15367VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15368{
15369 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15370
15371 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15372 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15373 Assert(!pVCpu->iem.s.cActiveMappings);
15374 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15375}
15376
15377
15378/**
15379 * Interface for HM and EM to emulate the INVLPG instruction.
15380 *
15381 * @returns Strict VBox status code.
15382 * @retval VINF_PGM_SYNC_CR3
15383 *
15384 * @param pVCpu The cross context virtual CPU structure.
15385 * @param cbInstr The instruction length in bytes.
15386 * @param GCPtrPage The effective address of the page to invalidate.
15387 *
15388 * @remarks In ring-0 not all of the state needs to be synced in.
15389 */
15390VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15391{
15392 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15393
15394 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15395 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15396 Assert(!pVCpu->iem.s.cActiveMappings);
15397 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15398}
15399
15400
15401/**
15402 * Interface for HM and EM to emulate the CPUID instruction.
15403 *
15404 * @returns Strict VBox status code.
15405 *
15406 * @param pVCpu The cross context virtual CPU structure.
15407 * @param cbInstr The instruction length in bytes.
15408 *
15409 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15410 */
15411VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15412{
15413 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15414 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15415
15416 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15417 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15418 Assert(!pVCpu->iem.s.cActiveMappings);
15419 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15420}
15421
15422
15423/**
15424 * Interface for HM and EM to emulate the RDPMC instruction.
15425 *
15426 * @returns Strict VBox status code.
15427 *
15428 * @param pVCpu The cross context virtual CPU structure.
15429 * @param cbInstr The instruction length in bytes.
15430 *
15431 * @remarks Not all of the state needs to be synced in.
15432 */
15433VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15434{
15435 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15436 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15437
15438 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15439 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15440 Assert(!pVCpu->iem.s.cActiveMappings);
15441 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15442}
15443
15444
15445/**
15446 * Interface for HM and EM to emulate the RDTSC instruction.
15447 *
15448 * @returns Strict VBox status code.
15449 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15450 *
15451 * @param pVCpu The cross context virtual CPU structure.
15452 * @param cbInstr The instruction length in bytes.
15453 *
15454 * @remarks Not all of the state needs to be synced in.
15455 */
15456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15457{
15458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15459 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15460
15461 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15463 Assert(!pVCpu->iem.s.cActiveMappings);
15464 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15465}
15466
15467
15468/**
15469 * Interface for HM and EM to emulate the RDTSCP instruction.
15470 *
15471 * @returns Strict VBox status code.
15472 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15473 *
15474 * @param pVCpu The cross context virtual CPU structure.
15475 * @param cbInstr The instruction length in bytes.
15476 *
15477 * @remarks Not all of the state needs to be synced in. Recommended
15478 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15479 */
15480VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15481{
15482 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15483 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15484
15485 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15486 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15487 Assert(!pVCpu->iem.s.cActiveMappings);
15488 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15489}
15490
15491
15492/**
15493 * Interface for HM and EM to emulate the RDMSR instruction.
15494 *
15495 * @returns Strict VBox status code.
15496 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15497 *
15498 * @param pVCpu The cross context virtual CPU structure.
15499 * @param cbInstr The instruction length in bytes.
15500 *
15501 * @remarks Not all of the state needs to be synced in. Requires RCX and
15502 * (currently) all MSRs.
15503 */
15504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15505{
15506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15507 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15508
15509 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15510 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15511 Assert(!pVCpu->iem.s.cActiveMappings);
15512 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15513}
15514
15515
15516/**
15517 * Interface for HM and EM to emulate the WRMSR instruction.
15518 *
15519 * @returns Strict VBox status code.
15520 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15521 *
15522 * @param pVCpu The cross context virtual CPU structure.
15523 * @param cbInstr The instruction length in bytes.
15524 *
15525 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15526 * and (currently) all MSRs.
15527 */
15528VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15529{
15530 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15531 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15532 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15533
15534 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15535 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15536 Assert(!pVCpu->iem.s.cActiveMappings);
15537 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15538}
15539
15540
15541/**
15542 * Interface for HM and EM to emulate the MONITOR instruction.
15543 *
15544 * @returns Strict VBox status code.
15545 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15546 *
15547 * @param pVCpu The cross context virtual CPU structure.
15548 * @param cbInstr The instruction length in bytes.
15549 *
15550 * @remarks Not all of the state needs to be synced in.
15551 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15552 * are used.
15553 */
15554VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15555{
15556 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15557 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15558
15559 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15560 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15561 Assert(!pVCpu->iem.s.cActiveMappings);
15562 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15563}
15564
15565
15566/**
15567 * Interface for HM and EM to emulate the MWAIT instruction.
15568 *
15569 * @returns Strict VBox status code.
15570 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15571 *
15572 * @param pVCpu The cross context virtual CPU structure.
15573 * @param cbInstr The instruction length in bytes.
15574 *
15575 * @remarks Not all of the state needs to be synced in.
15576 */
15577VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15578{
15579 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15580 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15581
15582 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15583 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15584 Assert(!pVCpu->iem.s.cActiveMappings);
15585 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15586}
15587
15588
15589/**
15590 * Interface for HM and EM to emulate the HLT instruction.
15591 *
15592 * @returns Strict VBox status code.
15593 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15594 *
15595 * @param pVCpu The cross context virtual CPU structure.
15596 * @param cbInstr The instruction length in bytes.
15597 *
15598 * @remarks Not all of the state needs to be synced in.
15599 */
15600VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15601{
15602 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15603
15604 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15605 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15606 Assert(!pVCpu->iem.s.cActiveMappings);
15607 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15608}
15609
15610
15611/**
15612 * Checks if IEM is in the process of delivering an event (interrupt or
15613 * exception).
15614 *
15615 * @returns true if we're in the process of raising an interrupt or exception,
15616 * false otherwise.
15617 * @param pVCpu The cross context virtual CPU structure.
15618 * @param puVector Where to store the vector associated with the
15619 * currently delivered event, optional.
15620 * @param pfFlags Where to store th event delivery flags (see
15621 * IEM_XCPT_FLAGS_XXX), optional.
15622 * @param puErr Where to store the error code associated with the
15623 * event, optional.
15624 * @param puCr2 Where to store the CR2 associated with the event,
15625 * optional.
15626 * @remarks The caller should check the flags to determine if the error code and
15627 * CR2 are valid for the event.
15628 */
15629VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15630{
15631 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15632 if (fRaisingXcpt)
15633 {
15634 if (puVector)
15635 *puVector = pVCpu->iem.s.uCurXcpt;
15636 if (pfFlags)
15637 *pfFlags = pVCpu->iem.s.fCurXcpt;
15638 if (puErr)
15639 *puErr = pVCpu->iem.s.uCurXcptErr;
15640 if (puCr2)
15641 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15642 }
15643 return fRaisingXcpt;
15644}
15645
15646#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15647
15648/**
15649 * Interface for HM and EM to emulate the CLGI instruction.
15650 *
15651 * @returns Strict VBox status code.
15652 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15653 * @param cbInstr The instruction length in bytes.
15654 * @thread EMT(pVCpu)
15655 */
15656VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15657{
15658 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15659
15660 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15661 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15662 Assert(!pVCpu->iem.s.cActiveMappings);
15663 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15664}
15665
15666
15667/**
15668 * Interface for HM and EM to emulate the STGI instruction.
15669 *
15670 * @returns Strict VBox status code.
15671 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15672 * @param cbInstr The instruction length in bytes.
15673 * @thread EMT(pVCpu)
15674 */
15675VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15676{
15677 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15678
15679 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15680 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15681 Assert(!pVCpu->iem.s.cActiveMappings);
15682 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15683}
15684
15685
15686/**
15687 * Interface for HM and EM to emulate the VMLOAD instruction.
15688 *
15689 * @returns Strict VBox status code.
15690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15691 * @param cbInstr The instruction length in bytes.
15692 * @thread EMT(pVCpu)
15693 */
15694VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15695{
15696 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15697
15698 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15699 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15700 Assert(!pVCpu->iem.s.cActiveMappings);
15701 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15702}
15703
15704
15705/**
15706 * Interface for HM and EM to emulate the VMSAVE instruction.
15707 *
15708 * @returns Strict VBox status code.
15709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15710 * @param cbInstr The instruction length in bytes.
15711 * @thread EMT(pVCpu)
15712 */
15713VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15714{
15715 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15716
15717 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15718 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15719 Assert(!pVCpu->iem.s.cActiveMappings);
15720 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15721}
15722
15723
15724/**
15725 * Interface for HM and EM to emulate the INVLPGA instruction.
15726 *
15727 * @returns Strict VBox status code.
15728 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15729 * @param cbInstr The instruction length in bytes.
15730 * @thread EMT(pVCpu)
15731 */
15732VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15733{
15734 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15735
15736 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15737 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15738 Assert(!pVCpu->iem.s.cActiveMappings);
15739 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15740}
15741
15742
15743/**
15744 * Interface for HM and EM to emulate the VMRUN instruction.
15745 *
15746 * @returns Strict VBox status code.
15747 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15748 * @param cbInstr The instruction length in bytes.
15749 * @thread EMT(pVCpu)
15750 */
15751VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15752{
15753 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15754 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15755
15756 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15757 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15758 Assert(!pVCpu->iem.s.cActiveMappings);
15759 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15760}
15761
15762
15763/**
15764 * Interface for HM and EM to emulate \#VMEXIT.
15765 *
15766 * @returns Strict VBox status code.
15767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15768 * @param uExitCode The exit code.
15769 * @param uExitInfo1 The exit info. 1 field.
15770 * @param uExitInfo2 The exit info. 2 field.
15771 * @thread EMT(pVCpu)
15772 */
15773VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15774{
15775 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15776 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15777 if (pVCpu->iem.s.cActiveMappings)
15778 iemMemRollback(pVCpu);
15779 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15780}
15781
15782#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15783
15784#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15785
15786/**
15787 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15788 *
15789 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15790 * are performed. Bounds checks are strict builds only.
15791 *
15792 * @param pVmcs Pointer to the virtual VMCS.
15793 * @param u64VmcsField The VMCS field.
15794 * @param pu64Dst Where to store the VMCS value.
15795 *
15796 * @remarks May be called with interrupts disabled.
15797 * @todo This should probably be moved to CPUM someday.
15798 */
15799VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15800{
15801 AssertPtr(pVmcs);
15802 AssertPtr(pu64Dst);
15803 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15804}
15805
15806
15807/**
15808 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15809 *
15810 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15811 * are performed. Bounds checks are strict builds only.
15812 *
15813 * @param pVmcs Pointer to the virtual VMCS.
15814 * @param u64VmcsField The VMCS field.
15815 * @param u64Val The value to write.
15816 *
15817 * @remarks May be called with interrupts disabled.
15818 * @todo This should probably be moved to CPUM someday.
15819 */
15820VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15821{
15822 AssertPtr(pVmcs);
15823 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15824}
15825
15826
15827/**
15828 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15829 *
15830 * @returns Strict VBox status code.
15831 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15832 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15833 * the x2APIC device.
15834 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15835 *
15836 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15837 * @param idMsr The MSR being read.
15838 * @param pu64Value Pointer to the value being written or where to store the
15839 * value being read.
15840 * @param fWrite Whether this is an MSR write or read access.
15841 * @thread EMT(pVCpu)
15842 */
15843VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15844{
15845 Assert(pu64Value);
15846
15847 VBOXSTRICTRC rcStrict;
15848 if (fWrite)
15849 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15850 else
15851 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15852 Assert(!pVCpu->iem.s.cActiveMappings);
15853 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15854
15855}
15856
15857
15858/**
15859 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15860 *
15861 * @returns Strict VBox status code.
15862 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15863 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15864 *
15865 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15866 * @param pExitInfo Pointer to the VM-exit information.
15867 * @param pExitEventInfo Pointer to the VM-exit event information.
15868 * @thread EMT(pVCpu)
15869 */
15870VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15871{
15872 Assert(pExitInfo);
15873 Assert(pExitEventInfo);
15874 Assert(pExitInfo->uReason == VMX_EXIT_APIC_ACCESS);
15875 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15876 Assert(!pVCpu->iem.s.cActiveMappings);
15877 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15878
15879}
15880
15881
15882/**
15883 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15884 * VM-exit.
15885 *
15886 * @returns Strict VBox status code.
15887 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15888 * @thread EMT(pVCpu)
15889 */
15890VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15891{
15892 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15893 Assert(!pVCpu->iem.s.cActiveMappings);
15894 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15895}
15896
15897
15898/**
15899 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15900 *
15901 * @returns Strict VBox status code.
15902 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15903 * @thread EMT(pVCpu)
15904 */
15905VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15906{
15907 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15908 Assert(!pVCpu->iem.s.cActiveMappings);
15909 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15910}
15911
15912
15913/**
15914 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15915 *
15916 * @returns Strict VBox status code.
15917 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15918 * @param uVector The external interrupt vector (pass 0 if the external
15919 * interrupt is still pending).
15920 * @param fIntPending Whether the external interrupt is pending or
15921 * acknowdledged in the interrupt controller.
15922 * @thread EMT(pVCpu)
15923 */
15924VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15925{
15926 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15927 Assert(!pVCpu->iem.s.cActiveMappings);
15928 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15929}
15930
15931
15932/**
15933 * Interface for HM and EM to emulate VM-exit due to exceptions.
15934 *
15935 * Exception includes NMIs, software exceptions (those generated by INT3 or
15936 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15937 *
15938 * @returns Strict VBox status code.
15939 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15940 * @param pExitInfo Pointer to the VM-exit information.
15941 * @param pExitEventInfo Pointer to the VM-exit event information.
15942 * @thread EMT(pVCpu)
15943 */
15944VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15945{
15946 Assert(pExitInfo);
15947 Assert(pExitEventInfo);
15948 Assert(pExitInfo->uReason == VMX_EXIT_XCPT_OR_NMI);
15949 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15950 Assert(!pVCpu->iem.s.cActiveMappings);
15951 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15952}
15953
15954
15955/**
15956 * Interface for HM and EM to emulate VM-exit due to NMIs.
15957 *
15958 * @returns Strict VBox status code.
15959 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15960 * @thread EMT(pVCpu)
15961 */
15962VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPU pVCpu)
15963{
15964 VMXVEXITINFO ExitInfo;
15965 RT_ZERO(ExitInfo);
15966 VMXVEXITEVENTINFO ExitEventInfo;
15967 RT_ZERO(ExitInfo);
15968 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15969 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15970 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15971
15972 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15973 Assert(!pVCpu->iem.s.cActiveMappings);
15974 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15975}
15976
15977
15978/**
15979 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15980 *
15981 * @returns Strict VBox status code.
15982 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15983 * @thread EMT(pVCpu)
15984 */
15985VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPU pVCpu)
15986{
15987 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15988 Assert(!pVCpu->iem.s.cActiveMappings);
15989 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15990}
15991
15992
15993/**
15994 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15995 *
15996 * @returns Strict VBox status code.
15997 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15998 * @param uVector The SIPI vector.
15999 * @thread EMT(pVCpu)
16000 */
16001VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
16002{
16003 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
16004 Assert(!pVCpu->iem.s.cActiveMappings);
16005 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16006}
16007
16008
16009/**
16010 * Interface for HM and EM to emulate a VM-exit.
16011 *
16012 * If a specialized version of a VM-exit handler exists, that must be used instead.
16013 *
16014 * @returns Strict VBox status code.
16015 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16016 * @param uExitReason The VM-exit reason.
16017 * @param u64ExitQual The Exit qualification.
16018 * @thread EMT(pVCpu)
16019 */
16020VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
16021{
16022 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
16023 Assert(!pVCpu->iem.s.cActiveMappings);
16024 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16025}
16026
16027
16028/**
16029 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16030 *
16031 * This is meant to be used for those instructions that VMX provides additional
16032 * decoding information beyond just the instruction length!
16033 *
16034 * @returns Strict VBox status code.
16035 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16036 * @param pExitInfo Pointer to the VM-exit information.
16037 * @thread EMT(pVCpu)
16038 */
16039VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16040{
16041 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
16042 Assert(!pVCpu->iem.s.cActiveMappings);
16043 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16044}
16045
16046
16047/**
16048 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16049 *
16050 * This is meant to be used for those instructions that VMX provides only the
16051 * instruction length.
16052 *
16053 * @returns Strict VBox status code.
16054 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16055 * @param pExitInfo Pointer to the VM-exit information.
16056 * @param cbInstr The instruction length in bytes.
16057 * @thread EMT(pVCpu)
16058 */
16059VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
16060{
16061 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
16062 Assert(!pVCpu->iem.s.cActiveMappings);
16063 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16064}
16065
16066
16067/**
16068 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16069 *
16070 * @returns Strict VBox status code.
16071 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16072 * @param pExitInfo Pointer to the VM-exit information.
16073 * @param pExitEventInfo Pointer to the VM-exit event information.
16074 * @thread EMT(pVCpu)
16075 */
16076VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16077{
16078 Assert(pExitInfo);
16079 Assert(pExitEventInfo);
16080 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16081 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16082 Assert(!pVCpu->iem.s.cActiveMappings);
16083 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16084}
16085
16086
16087/**
16088 * Interface for HM and EM to emulate the VMREAD instruction.
16089 *
16090 * @returns Strict VBox status code.
16091 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16092 * @param pExitInfo Pointer to the VM-exit information.
16093 * @thread EMT(pVCpu)
16094 */
16095VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16096{
16097 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16098 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16099 Assert(pExitInfo);
16100
16101 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16102
16103 VBOXSTRICTRC rcStrict;
16104 uint8_t const cbInstr = pExitInfo->cbInstr;
16105 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16106 uint64_t const u64FieldEnc = fIs64BitMode
16107 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16108 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16109 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16110 {
16111 if (fIs64BitMode)
16112 {
16113 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16114 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16115 }
16116 else
16117 {
16118 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16119 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16120 }
16121 }
16122 else
16123 {
16124 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16125 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16126 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16127 }
16128 Assert(!pVCpu->iem.s.cActiveMappings);
16129 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16130}
16131
16132
16133/**
16134 * Interface for HM and EM to emulate the VMWRITE instruction.
16135 *
16136 * @returns Strict VBox status code.
16137 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16138 * @param pExitInfo Pointer to the VM-exit information.
16139 * @thread EMT(pVCpu)
16140 */
16141VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16142{
16143 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16144 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16145 Assert(pExitInfo);
16146
16147 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16148
16149 uint64_t u64Val;
16150 uint8_t iEffSeg;
16151 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16152 {
16153 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16154 iEffSeg = UINT8_MAX;
16155 }
16156 else
16157 {
16158 u64Val = pExitInfo->GCPtrEffAddr;
16159 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16160 }
16161 uint8_t const cbInstr = pExitInfo->cbInstr;
16162 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16163 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16164 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16165 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16166 Assert(!pVCpu->iem.s.cActiveMappings);
16167 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16168}
16169
16170
16171/**
16172 * Interface for HM and EM to emulate the VMPTRLD instruction.
16173 *
16174 * @returns Strict VBox status code.
16175 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16176 * @param pExitInfo Pointer to the VM-exit information.
16177 * @thread EMT(pVCpu)
16178 */
16179VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16180{
16181 Assert(pExitInfo);
16182 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16183 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16184
16185 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16186
16187 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16188 uint8_t const cbInstr = pExitInfo->cbInstr;
16189 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16190 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16191 Assert(!pVCpu->iem.s.cActiveMappings);
16192 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16193}
16194
16195
16196/**
16197 * Interface for HM and EM to emulate the VMPTRST instruction.
16198 *
16199 * @returns Strict VBox status code.
16200 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16201 * @param pExitInfo Pointer to the VM-exit information.
16202 * @thread EMT(pVCpu)
16203 */
16204VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16205{
16206 Assert(pExitInfo);
16207 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16209
16210 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16211
16212 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16213 uint8_t const cbInstr = pExitInfo->cbInstr;
16214 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16215 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16216 Assert(!pVCpu->iem.s.cActiveMappings);
16217 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16218}
16219
16220
16221/**
16222 * Interface for HM and EM to emulate the VMCLEAR instruction.
16223 *
16224 * @returns Strict VBox status code.
16225 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16226 * @param pExitInfo Pointer to the VM-exit information.
16227 * @thread EMT(pVCpu)
16228 */
16229VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16230{
16231 Assert(pExitInfo);
16232 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16233 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16234
16235 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16236
16237 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16238 uint8_t const cbInstr = pExitInfo->cbInstr;
16239 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16240 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16241 Assert(!pVCpu->iem.s.cActiveMappings);
16242 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16243}
16244
16245
16246/**
16247 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16248 *
16249 * @returns Strict VBox status code.
16250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16251 * @param cbInstr The instruction length in bytes.
16252 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16253 * VMXINSTRID_VMRESUME).
16254 * @thread EMT(pVCpu)
16255 */
16256VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16257{
16258 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16259 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16260
16261 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16262 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16263 Assert(!pVCpu->iem.s.cActiveMappings);
16264 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16265}
16266
16267
16268/**
16269 * Interface for HM and EM to emulate the VMXON instruction.
16270 *
16271 * @returns Strict VBox status code.
16272 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16273 * @param pExitInfo Pointer to the VM-exit information.
16274 * @thread EMT(pVCpu)
16275 */
16276VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16277{
16278 Assert(pExitInfo);
16279 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16280 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16281
16282 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16283
16284 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16285 uint8_t const cbInstr = pExitInfo->cbInstr;
16286 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16287 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16288 Assert(!pVCpu->iem.s.cActiveMappings);
16289 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16290}
16291
16292
16293/**
16294 * Interface for HM and EM to emulate the VMXOFF instruction.
16295 *
16296 * @returns Strict VBox status code.
16297 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16298 * @param cbInstr The instruction length in bytes.
16299 * @thread EMT(pVCpu)
16300 */
16301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16302{
16303 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16304 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16305
16306 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16307 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16308 Assert(!pVCpu->iem.s.cActiveMappings);
16309 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16310}
16311
16312
16313/**
16314 * Interface for HM and EM to emulate the INVVPID instruction.
16315 *
16316 * @returns Strict VBox status code.
16317 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16318 * @param pExitInfo Pointer to the VM-exit information.
16319 * @thread EMT(pVCpu)
16320 */
16321VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16322{
16323 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16324 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16325 Assert(pExitInfo);
16326
16327 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16328
16329 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16330 uint8_t const cbInstr = pExitInfo->cbInstr;
16331 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16332 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16333 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16334 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16335 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16336 Assert(!pVCpu->iem.s.cActiveMappings);
16337 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16338}
16339
16340
16341/**
16342 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16343 *
16344 * @remarks The @a pvUser argument is currently unused.
16345 */
16346PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16347 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16348 PGMACCESSORIGIN enmOrigin, void *pvUser)
16349{
16350 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16351
16352 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16353 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16354 {
16355 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16356 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16357
16358 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16359 * Currently they will go through as read accesses. */
16360 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16361 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16362 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16363 if (RT_FAILURE(rcStrict))
16364 return rcStrict;
16365
16366 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16367 return VINF_SUCCESS;
16368 }
16369
16370 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16371 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16372 if (RT_FAILURE(rc))
16373 return rc;
16374
16375 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16376 return VINF_PGM_HANDLER_DO_DEFAULT;
16377}
16378
16379#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16380
16381#ifdef IN_RING3
16382
16383/**
16384 * Handles the unlikely and probably fatal merge cases.
16385 *
16386 * @returns Merged status code.
16387 * @param rcStrict Current EM status code.
16388 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16389 * with @a rcStrict.
16390 * @param iMemMap The memory mapping index. For error reporting only.
16391 * @param pVCpu The cross context virtual CPU structure of the calling
16392 * thread, for error reporting only.
16393 */
16394DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16395 unsigned iMemMap, PVMCPU pVCpu)
16396{
16397 if (RT_FAILURE_NP(rcStrict))
16398 return rcStrict;
16399
16400 if (RT_FAILURE_NP(rcStrictCommit))
16401 return rcStrictCommit;
16402
16403 if (rcStrict == rcStrictCommit)
16404 return rcStrictCommit;
16405
16406 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16407 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16408 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16411 return VERR_IOM_FF_STATUS_IPE;
16412}
16413
16414
16415/**
16416 * Helper for IOMR3ProcessForceFlag.
16417 *
16418 * @returns Merged status code.
16419 * @param rcStrict Current EM status code.
16420 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16421 * with @a rcStrict.
16422 * @param iMemMap The memory mapping index. For error reporting only.
16423 * @param pVCpu The cross context virtual CPU structure of the calling
16424 * thread, for error reporting only.
16425 */
16426DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16427{
16428 /* Simple. */
16429 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16430 return rcStrictCommit;
16431
16432 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16433 return rcStrict;
16434
16435 /* EM scheduling status codes. */
16436 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16437 && rcStrict <= VINF_EM_LAST))
16438 {
16439 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16440 && rcStrictCommit <= VINF_EM_LAST))
16441 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16442 }
16443
16444 /* Unlikely */
16445 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16446}
16447
16448
16449/**
16450 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16451 *
16452 * @returns Merge between @a rcStrict and what the commit operation returned.
16453 * @param pVM The cross context VM structure.
16454 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16455 * @param rcStrict The status code returned by ring-0 or raw-mode.
16456 */
16457VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16458{
16459 /*
16460 * Reset the pending commit.
16461 */
16462 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16463 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16464 ("%#x %#x %#x\n",
16465 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16466 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16467
16468 /*
16469 * Commit the pending bounce buffers (usually just one).
16470 */
16471 unsigned cBufs = 0;
16472 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16473 while (iMemMap-- > 0)
16474 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16475 {
16476 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16477 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16478 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16479
16480 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16481 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16482 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16483
16484 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16485 {
16486 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16488 pbBuf,
16489 cbFirst,
16490 PGMACCESSORIGIN_IEM);
16491 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16492 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16493 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16494 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16495 }
16496
16497 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16498 {
16499 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16500 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16501 pbBuf + cbFirst,
16502 cbSecond,
16503 PGMACCESSORIGIN_IEM);
16504 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16505 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16506 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16507 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16508 }
16509 cBufs++;
16510 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16511 }
16512
16513 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16514 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16515 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16516 pVCpu->iem.s.cActiveMappings = 0;
16517 return rcStrict;
16518}
16519
16520#endif /* IN_RING3 */
16521
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette