VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 77612

Last change on this file since 77612 was 77612, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Fix uninitialized case.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 647.0 KB
Line 
1/* $Id: IEMAll.cpp 77612 2019-03-08 10:39:40Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handle for triple faults.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
450 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
987IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
988IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
989IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmiWindow(PVMCPU pVCpu);
990IEM_STATIC VBOXSTRICTRC iemVmxVmexitMtf(PVMCPU pVCpu);
991IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
992IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
993IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
994IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
995#endif
996
997#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
998IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
999IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
1000#endif
1001
1002
1003/**
1004 * Sets the pass up status.
1005 *
1006 * @returns VINF_SUCCESS.
1007 * @param pVCpu The cross context virtual CPU structure of the
1008 * calling thread.
1009 * @param rcPassUp The pass up status. Must be informational.
1010 * VINF_SUCCESS is not allowed.
1011 */
1012IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1013{
1014 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1015
1016 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1017 if (rcOldPassUp == VINF_SUCCESS)
1018 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1019 /* If both are EM scheduling codes, use EM priority rules. */
1020 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1021 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1022 {
1023 if (rcPassUp < rcOldPassUp)
1024 {
1025 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1026 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1027 }
1028 else
1029 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1030 }
1031 /* Override EM scheduling with specific status code. */
1032 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1033 {
1034 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1035 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1036 }
1037 /* Don't override specific status code, first come first served. */
1038 else
1039 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1040 return VINF_SUCCESS;
1041}
1042
1043
1044/**
1045 * Calculates the CPU mode.
1046 *
1047 * This is mainly for updating IEMCPU::enmCpuMode.
1048 *
1049 * @returns CPU mode.
1050 * @param pVCpu The cross context virtual CPU structure of the
1051 * calling thread.
1052 */
1053DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1054{
1055 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1056 return IEMMODE_64BIT;
1057 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1058 return IEMMODE_32BIT;
1059 return IEMMODE_16BIT;
1060}
1061
1062
1063/**
1064 * Initializes the execution state.
1065 *
1066 * @param pVCpu The cross context virtual CPU structure of the
1067 * calling thread.
1068 * @param fBypassHandlers Whether to bypass access handlers.
1069 *
1070 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1071 * side-effects in strict builds.
1072 */
1073DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1074{
1075 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1076 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1077
1078#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1084 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1086 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1087#endif
1088
1089#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1090 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1091#endif
1092 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1093 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1094#ifdef VBOX_STRICT
1095 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1096 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1097 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1098 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1099 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1100 pVCpu->iem.s.uRexReg = 127;
1101 pVCpu->iem.s.uRexB = 127;
1102 pVCpu->iem.s.offModRm = 127;
1103 pVCpu->iem.s.uRexIndex = 127;
1104 pVCpu->iem.s.iEffSeg = 127;
1105 pVCpu->iem.s.idxPrefix = 127;
1106 pVCpu->iem.s.uVex3rdReg = 127;
1107 pVCpu->iem.s.uVexLength = 127;
1108 pVCpu->iem.s.fEvexStuff = 127;
1109 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1110# ifdef IEM_WITH_CODE_TLB
1111 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1112 pVCpu->iem.s.pbInstrBuf = NULL;
1113 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1114 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1115 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1116 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1117# else
1118 pVCpu->iem.s.offOpcode = 127;
1119 pVCpu->iem.s.cbOpcode = 127;
1120# endif
1121#endif
1122
1123 pVCpu->iem.s.cActiveMappings = 0;
1124 pVCpu->iem.s.iNextMapping = 0;
1125 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1126 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1127#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1128 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1129 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1130 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1131 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1132 if (!pVCpu->iem.s.fInPatchCode)
1133 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1134#endif
1135}
1136
1137#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1138/**
1139 * Performs a minimal reinitialization of the execution state.
1140 *
1141 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1142 * 'world-switch' types operations on the CPU. Currently only nested
1143 * hardware-virtualization uses it.
1144 *
1145 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1146 */
1147IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1148{
1149 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1150 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1151
1152 pVCpu->iem.s.uCpl = uCpl;
1153 pVCpu->iem.s.enmCpuMode = enmMode;
1154 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1155 pVCpu->iem.s.enmEffAddrMode = enmMode;
1156 if (enmMode != IEMMODE_64BIT)
1157 {
1158 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1159 pVCpu->iem.s.enmEffOpSize = enmMode;
1160 }
1161 else
1162 {
1163 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1164 pVCpu->iem.s.enmEffOpSize = enmMode;
1165 }
1166 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1167#ifndef IEM_WITH_CODE_TLB
1168 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1169 pVCpu->iem.s.offOpcode = 0;
1170 pVCpu->iem.s.cbOpcode = 0;
1171#endif
1172 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1173}
1174#endif
1175
1176/**
1177 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1178 *
1179 * @param pVCpu The cross context virtual CPU structure of the
1180 * calling thread.
1181 */
1182DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1183{
1184 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1185#ifdef VBOX_STRICT
1186# ifdef IEM_WITH_CODE_TLB
1187 NOREF(pVCpu);
1188# else
1189 pVCpu->iem.s.cbOpcode = 0;
1190# endif
1191#else
1192 NOREF(pVCpu);
1193#endif
1194}
1195
1196
1197/**
1198 * Initializes the decoder state.
1199 *
1200 * iemReInitDecoder is mostly a copy of this function.
1201 *
1202 * @param pVCpu The cross context virtual CPU structure of the
1203 * calling thread.
1204 * @param fBypassHandlers Whether to bypass access handlers.
1205 */
1206DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1207{
1208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1209 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1210
1211#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1220#endif
1221
1222#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1223 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1224#endif
1225 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1226 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1227 pVCpu->iem.s.enmCpuMode = enmMode;
1228 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1229 pVCpu->iem.s.enmEffAddrMode = enmMode;
1230 if (enmMode != IEMMODE_64BIT)
1231 {
1232 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1233 pVCpu->iem.s.enmEffOpSize = enmMode;
1234 }
1235 else
1236 {
1237 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1238 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1239 }
1240 pVCpu->iem.s.fPrefixes = 0;
1241 pVCpu->iem.s.uRexReg = 0;
1242 pVCpu->iem.s.uRexB = 0;
1243 pVCpu->iem.s.uRexIndex = 0;
1244 pVCpu->iem.s.idxPrefix = 0;
1245 pVCpu->iem.s.uVex3rdReg = 0;
1246 pVCpu->iem.s.uVexLength = 0;
1247 pVCpu->iem.s.fEvexStuff = 0;
1248 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1249#ifdef IEM_WITH_CODE_TLB
1250 pVCpu->iem.s.pbInstrBuf = NULL;
1251 pVCpu->iem.s.offInstrNextByte = 0;
1252 pVCpu->iem.s.offCurInstrStart = 0;
1253# ifdef VBOX_STRICT
1254 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1255 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1256 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1257# endif
1258#else
1259 pVCpu->iem.s.offOpcode = 0;
1260 pVCpu->iem.s.cbOpcode = 0;
1261#endif
1262 pVCpu->iem.s.offModRm = 0;
1263 pVCpu->iem.s.cActiveMappings = 0;
1264 pVCpu->iem.s.iNextMapping = 0;
1265 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1266 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1267#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1268 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1269 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1270 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1271 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1272 if (!pVCpu->iem.s.fInPatchCode)
1273 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1274#endif
1275
1276#ifdef DBGFTRACE_ENABLED
1277 switch (enmMode)
1278 {
1279 case IEMMODE_64BIT:
1280 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1281 break;
1282 case IEMMODE_32BIT:
1283 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1284 break;
1285 case IEMMODE_16BIT:
1286 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1287 break;
1288 }
1289#endif
1290}
1291
1292
1293/**
1294 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1295 *
1296 * This is mostly a copy of iemInitDecoder.
1297 *
1298 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1299 */
1300DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1301{
1302 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1303
1304#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1313#endif
1314
1315 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1316 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1317 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1318 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1319 pVCpu->iem.s.enmEffAddrMode = enmMode;
1320 if (enmMode != IEMMODE_64BIT)
1321 {
1322 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1323 pVCpu->iem.s.enmEffOpSize = enmMode;
1324 }
1325 else
1326 {
1327 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1328 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1329 }
1330 pVCpu->iem.s.fPrefixes = 0;
1331 pVCpu->iem.s.uRexReg = 0;
1332 pVCpu->iem.s.uRexB = 0;
1333 pVCpu->iem.s.uRexIndex = 0;
1334 pVCpu->iem.s.idxPrefix = 0;
1335 pVCpu->iem.s.uVex3rdReg = 0;
1336 pVCpu->iem.s.uVexLength = 0;
1337 pVCpu->iem.s.fEvexStuff = 0;
1338 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1339#ifdef IEM_WITH_CODE_TLB
1340 if (pVCpu->iem.s.pbInstrBuf)
1341 {
1342 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1343 - pVCpu->iem.s.uInstrBufPc;
1344 if (off < pVCpu->iem.s.cbInstrBufTotal)
1345 {
1346 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1347 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1348 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1349 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1350 else
1351 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1352 }
1353 else
1354 {
1355 pVCpu->iem.s.pbInstrBuf = NULL;
1356 pVCpu->iem.s.offInstrNextByte = 0;
1357 pVCpu->iem.s.offCurInstrStart = 0;
1358 pVCpu->iem.s.cbInstrBuf = 0;
1359 pVCpu->iem.s.cbInstrBufTotal = 0;
1360 }
1361 }
1362 else
1363 {
1364 pVCpu->iem.s.offInstrNextByte = 0;
1365 pVCpu->iem.s.offCurInstrStart = 0;
1366 pVCpu->iem.s.cbInstrBuf = 0;
1367 pVCpu->iem.s.cbInstrBufTotal = 0;
1368 }
1369#else
1370 pVCpu->iem.s.cbOpcode = 0;
1371 pVCpu->iem.s.offOpcode = 0;
1372#endif
1373 pVCpu->iem.s.offModRm = 0;
1374 Assert(pVCpu->iem.s.cActiveMappings == 0);
1375 pVCpu->iem.s.iNextMapping = 0;
1376 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1377 Assert(pVCpu->iem.s.fBypassHandlers == false);
1378#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1379 if (!pVCpu->iem.s.fInPatchCode)
1380 { /* likely */ }
1381 else
1382 {
1383 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1384 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1385 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1386 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1387 if (!pVCpu->iem.s.fInPatchCode)
1388 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1389 }
1390#endif
1391
1392#ifdef DBGFTRACE_ENABLED
1393 switch (enmMode)
1394 {
1395 case IEMMODE_64BIT:
1396 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1397 break;
1398 case IEMMODE_32BIT:
1399 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1400 break;
1401 case IEMMODE_16BIT:
1402 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1403 break;
1404 }
1405#endif
1406}
1407
1408
1409
1410/**
1411 * Prefetch opcodes the first time when starting executing.
1412 *
1413 * @returns Strict VBox status code.
1414 * @param pVCpu The cross context virtual CPU structure of the
1415 * calling thread.
1416 * @param fBypassHandlers Whether to bypass access handlers.
1417 */
1418IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1419{
1420 iemInitDecoder(pVCpu, fBypassHandlers);
1421
1422#ifdef IEM_WITH_CODE_TLB
1423 /** @todo Do ITLB lookup here. */
1424
1425#else /* !IEM_WITH_CODE_TLB */
1426
1427 /*
1428 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1429 *
1430 * First translate CS:rIP to a physical address.
1431 */
1432 uint32_t cbToTryRead;
1433 RTGCPTR GCPtrPC;
1434 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1435 {
1436 cbToTryRead = PAGE_SIZE;
1437 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1438 if (IEM_IS_CANONICAL(GCPtrPC))
1439 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1440 else
1441 return iemRaiseGeneralProtectionFault0(pVCpu);
1442 }
1443 else
1444 {
1445 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1446 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1447 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1448 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1449 else
1450 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1451 if (cbToTryRead) { /* likely */ }
1452 else /* overflowed */
1453 {
1454 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1455 cbToTryRead = UINT32_MAX;
1456 }
1457 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1458 Assert(GCPtrPC <= UINT32_MAX);
1459 }
1460
1461# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1462 /* Allow interpretation of patch manager code blocks since they can for
1463 instance throw #PFs for perfectly good reasons. */
1464 if (pVCpu->iem.s.fInPatchCode)
1465 {
1466 size_t cbRead = 0;
1467 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1468 AssertRCReturn(rc, rc);
1469 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1470 return VINF_SUCCESS;
1471 }
1472# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1473
1474 RTGCPHYS GCPhys;
1475 uint64_t fFlags;
1476 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1477 if (RT_SUCCESS(rc)) { /* probable */ }
1478 else
1479 {
1480 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1481 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1482 }
1483 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1484 else
1485 {
1486 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1487 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1488 }
1489 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1490 else
1491 {
1492 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1493 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1494 }
1495 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1496 /** @todo Check reserved bits and such stuff. PGM is better at doing
1497 * that, so do it when implementing the guest virtual address
1498 * TLB... */
1499
1500 /*
1501 * Read the bytes at this address.
1502 */
1503 PVM pVM = pVCpu->CTX_SUFF(pVM);
1504# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1505 size_t cbActual;
1506 if ( PATMIsEnabled(pVM)
1507 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1508 {
1509 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1510 Assert(cbActual > 0);
1511 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1512 }
1513 else
1514# endif
1515 {
1516 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1517 if (cbToTryRead > cbLeftOnPage)
1518 cbToTryRead = cbLeftOnPage;
1519 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1520 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1521
1522 if (!pVCpu->iem.s.fBypassHandlers)
1523 {
1524 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1525 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1526 { /* likely */ }
1527 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1528 {
1529 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1530 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1531 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1532 }
1533 else
1534 {
1535 Log((RT_SUCCESS(rcStrict)
1536 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1537 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1538 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1539 return rcStrict;
1540 }
1541 }
1542 else
1543 {
1544 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1545 if (RT_SUCCESS(rc))
1546 { /* likely */ }
1547 else
1548 {
1549 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1550 GCPtrPC, GCPhys, rc, cbToTryRead));
1551 return rc;
1552 }
1553 }
1554 pVCpu->iem.s.cbOpcode = cbToTryRead;
1555 }
1556#endif /* !IEM_WITH_CODE_TLB */
1557 return VINF_SUCCESS;
1558}
1559
1560
1561/**
1562 * Invalidates the IEM TLBs.
1563 *
1564 * This is called internally as well as by PGM when moving GC mappings.
1565 *
1566 * @returns
1567 * @param pVCpu The cross context virtual CPU structure of the calling
1568 * thread.
1569 * @param fVmm Set when PGM calls us with a remapping.
1570 */
1571VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1572{
1573#ifdef IEM_WITH_CODE_TLB
1574 pVCpu->iem.s.cbInstrBufTotal = 0;
1575 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1576 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1577 { /* very likely */ }
1578 else
1579 {
1580 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1581 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1582 while (i-- > 0)
1583 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1584 }
1585#endif
1586
1587#ifdef IEM_WITH_DATA_TLB
1588 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1589 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1590 { /* very likely */ }
1591 else
1592 {
1593 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1594 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1595 while (i-- > 0)
1596 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1597 }
1598#endif
1599 NOREF(pVCpu); NOREF(fVmm);
1600}
1601
1602
1603/**
1604 * Invalidates a page in the TLBs.
1605 *
1606 * @param pVCpu The cross context virtual CPU structure of the calling
1607 * thread.
1608 * @param GCPtr The address of the page to invalidate
1609 */
1610VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1611{
1612#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1613 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1614 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1615 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1616 uintptr_t idx = (uint8_t)GCPtr;
1617
1618# ifdef IEM_WITH_CODE_TLB
1619 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1620 {
1621 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1622 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1623 pVCpu->iem.s.cbInstrBufTotal = 0;
1624 }
1625# endif
1626
1627# ifdef IEM_WITH_DATA_TLB
1628 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1629 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1630# endif
1631#else
1632 NOREF(pVCpu); NOREF(GCPtr);
1633#endif
1634}
1635
1636
1637/**
1638 * Invalidates the host physical aspects of the IEM TLBs.
1639 *
1640 * This is called internally as well as by PGM when moving GC mappings.
1641 *
1642 * @param pVCpu The cross context virtual CPU structure of the calling
1643 * thread.
1644 */
1645VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1646{
1647#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1648 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1649
1650# ifdef IEM_WITH_CODE_TLB
1651 pVCpu->iem.s.cbInstrBufTotal = 0;
1652# endif
1653 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1654 if (uTlbPhysRev != 0)
1655 {
1656 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1657 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1658 }
1659 else
1660 {
1661 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1662 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1663
1664 unsigned i;
1665# ifdef IEM_WITH_CODE_TLB
1666 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1667 while (i-- > 0)
1668 {
1669 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1670 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1671 }
1672# endif
1673# ifdef IEM_WITH_DATA_TLB
1674 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1675 while (i-- > 0)
1676 {
1677 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1678 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1679 }
1680# endif
1681 }
1682#else
1683 NOREF(pVCpu);
1684#endif
1685}
1686
1687
1688/**
1689 * Invalidates the host physical aspects of the IEM TLBs.
1690 *
1691 * This is called internally as well as by PGM when moving GC mappings.
1692 *
1693 * @param pVM The cross context VM structure.
1694 *
1695 * @remarks Caller holds the PGM lock.
1696 */
1697VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1698{
1699 RT_NOREF_PV(pVM);
1700}
1701
1702#ifdef IEM_WITH_CODE_TLB
1703
1704/**
1705 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1706 * failure and jumps.
1707 *
1708 * We end up here for a number of reasons:
1709 * - pbInstrBuf isn't yet initialized.
1710 * - Advancing beyond the buffer boundrary (e.g. cross page).
1711 * - Advancing beyond the CS segment limit.
1712 * - Fetching from non-mappable page (e.g. MMIO).
1713 *
1714 * @param pVCpu The cross context virtual CPU structure of the
1715 * calling thread.
1716 * @param pvDst Where to return the bytes.
1717 * @param cbDst Number of bytes to read.
1718 *
1719 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1720 */
1721IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1722{
1723#ifdef IN_RING3
1724 for (;;)
1725 {
1726 Assert(cbDst <= 8);
1727 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1728
1729 /*
1730 * We might have a partial buffer match, deal with that first to make the
1731 * rest simpler. This is the first part of the cross page/buffer case.
1732 */
1733 if (pVCpu->iem.s.pbInstrBuf != NULL)
1734 {
1735 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1736 {
1737 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1738 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1739 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1740
1741 cbDst -= cbCopy;
1742 pvDst = (uint8_t *)pvDst + cbCopy;
1743 offBuf += cbCopy;
1744 pVCpu->iem.s.offInstrNextByte += offBuf;
1745 }
1746 }
1747
1748 /*
1749 * Check segment limit, figuring how much we're allowed to access at this point.
1750 *
1751 * We will fault immediately if RIP is past the segment limit / in non-canonical
1752 * territory. If we do continue, there are one or more bytes to read before we
1753 * end up in trouble and we need to do that first before faulting.
1754 */
1755 RTGCPTR GCPtrFirst;
1756 uint32_t cbMaxRead;
1757 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1758 {
1759 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1760 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1761 { /* likely */ }
1762 else
1763 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1764 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1765 }
1766 else
1767 {
1768 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1769 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1770 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1771 { /* likely */ }
1772 else
1773 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1774 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1775 if (cbMaxRead != 0)
1776 { /* likely */ }
1777 else
1778 {
1779 /* Overflowed because address is 0 and limit is max. */
1780 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1781 cbMaxRead = X86_PAGE_SIZE;
1782 }
1783 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1784 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1785 if (cbMaxRead2 < cbMaxRead)
1786 cbMaxRead = cbMaxRead2;
1787 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1788 }
1789
1790 /*
1791 * Get the TLB entry for this piece of code.
1792 */
1793 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1794 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1795 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1796 if (pTlbe->uTag == uTag)
1797 {
1798 /* likely when executing lots of code, otherwise unlikely */
1799# ifdef VBOX_WITH_STATISTICS
1800 pVCpu->iem.s.CodeTlb.cTlbHits++;
1801# endif
1802 }
1803 else
1804 {
1805 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1806# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1807 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1808 {
1809 pTlbe->uTag = uTag;
1810 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1811 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1812 pTlbe->GCPhys = NIL_RTGCPHYS;
1813 pTlbe->pbMappingR3 = NULL;
1814 }
1815 else
1816# endif
1817 {
1818 RTGCPHYS GCPhys;
1819 uint64_t fFlags;
1820 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1821 if (RT_FAILURE(rc))
1822 {
1823 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1824 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1825 }
1826
1827 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1828 pTlbe->uTag = uTag;
1829 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1830 pTlbe->GCPhys = GCPhys;
1831 pTlbe->pbMappingR3 = NULL;
1832 }
1833 }
1834
1835 /*
1836 * Check TLB page table level access flags.
1837 */
1838 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1839 {
1840 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1841 {
1842 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1843 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1844 }
1845 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1846 {
1847 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1848 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1849 }
1850 }
1851
1852# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1853 /*
1854 * Allow interpretation of patch manager code blocks since they can for
1855 * instance throw #PFs for perfectly good reasons.
1856 */
1857 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1858 { /* no unlikely */ }
1859 else
1860 {
1861 /** @todo Could be optimized this a little in ring-3 if we liked. */
1862 size_t cbRead = 0;
1863 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1864 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1865 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1866 return;
1867 }
1868# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1869
1870 /*
1871 * Look up the physical page info if necessary.
1872 */
1873 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1874 { /* not necessary */ }
1875 else
1876 {
1877 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1878 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1879 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1880 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1881 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1882 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1883 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1884 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1885 }
1886
1887# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1888 /*
1889 * Try do a direct read using the pbMappingR3 pointer.
1890 */
1891 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1892 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1893 {
1894 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1895 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1896 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1897 {
1898 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1899 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1900 }
1901 else
1902 {
1903 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1904 Assert(cbInstr < cbMaxRead);
1905 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1906 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1907 }
1908 if (cbDst <= cbMaxRead)
1909 {
1910 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1911 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1912 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1913 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1914 return;
1915 }
1916 pVCpu->iem.s.pbInstrBuf = NULL;
1917
1918 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1919 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1920 }
1921 else
1922# endif
1923#if 0
1924 /*
1925 * If there is no special read handling, so we can read a bit more and
1926 * put it in the prefetch buffer.
1927 */
1928 if ( cbDst < cbMaxRead
1929 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1930 {
1931 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1932 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1933 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1934 { /* likely */ }
1935 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1936 {
1937 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1938 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1939 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1940 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1941 }
1942 else
1943 {
1944 Log((RT_SUCCESS(rcStrict)
1945 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1946 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1947 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1948 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1949 }
1950 }
1951 /*
1952 * Special read handling, so only read exactly what's needed.
1953 * This is a highly unlikely scenario.
1954 */
1955 else
1956#endif
1957 {
1958 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1959 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1960 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1961 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1962 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1963 { /* likely */ }
1964 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1965 {
1966 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1967 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1968 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1969 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1970 }
1971 else
1972 {
1973 Log((RT_SUCCESS(rcStrict)
1974 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1975 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1976 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1977 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1978 }
1979 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1980 if (cbToRead == cbDst)
1981 return;
1982 }
1983
1984 /*
1985 * More to read, loop.
1986 */
1987 cbDst -= cbMaxRead;
1988 pvDst = (uint8_t *)pvDst + cbMaxRead;
1989 }
1990#else
1991 RT_NOREF(pvDst, cbDst);
1992 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1993#endif
1994}
1995
1996#else
1997
1998/**
1999 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
2000 * exception if it fails.
2001 *
2002 * @returns Strict VBox status code.
2003 * @param pVCpu The cross context virtual CPU structure of the
2004 * calling thread.
2005 * @param cbMin The minimum number of bytes relative offOpcode
2006 * that must be read.
2007 */
2008IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2009{
2010 /*
2011 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2012 *
2013 * First translate CS:rIP to a physical address.
2014 */
2015 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2016 uint32_t cbToTryRead;
2017 RTGCPTR GCPtrNext;
2018 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2019 {
2020 cbToTryRead = PAGE_SIZE;
2021 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2022 if (!IEM_IS_CANONICAL(GCPtrNext))
2023 return iemRaiseGeneralProtectionFault0(pVCpu);
2024 }
2025 else
2026 {
2027 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2028 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2029 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2030 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2031 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2032 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2033 if (!cbToTryRead) /* overflowed */
2034 {
2035 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2036 cbToTryRead = UINT32_MAX;
2037 /** @todo check out wrapping around the code segment. */
2038 }
2039 if (cbToTryRead < cbMin - cbLeft)
2040 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2041 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2042 }
2043
2044 /* Only read up to the end of the page, and make sure we don't read more
2045 than the opcode buffer can hold. */
2046 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2047 if (cbToTryRead > cbLeftOnPage)
2048 cbToTryRead = cbLeftOnPage;
2049 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2050 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2051/** @todo r=bird: Convert assertion into undefined opcode exception? */
2052 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2053
2054# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2055 /* Allow interpretation of patch manager code blocks since they can for
2056 instance throw #PFs for perfectly good reasons. */
2057 if (pVCpu->iem.s.fInPatchCode)
2058 {
2059 size_t cbRead = 0;
2060 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2061 AssertRCReturn(rc, rc);
2062 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2063 return VINF_SUCCESS;
2064 }
2065# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2066
2067 RTGCPHYS GCPhys;
2068 uint64_t fFlags;
2069 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2070 if (RT_FAILURE(rc))
2071 {
2072 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2073 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2074 }
2075 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2076 {
2077 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2078 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2079 }
2080 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2081 {
2082 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2083 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2084 }
2085 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2086 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2087 /** @todo Check reserved bits and such stuff. PGM is better at doing
2088 * that, so do it when implementing the guest virtual address
2089 * TLB... */
2090
2091 /*
2092 * Read the bytes at this address.
2093 *
2094 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2095 * and since PATM should only patch the start of an instruction there
2096 * should be no need to check again here.
2097 */
2098 if (!pVCpu->iem.s.fBypassHandlers)
2099 {
2100 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2101 cbToTryRead, PGMACCESSORIGIN_IEM);
2102 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2103 { /* likely */ }
2104 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2105 {
2106 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2107 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2108 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2109 }
2110 else
2111 {
2112 Log((RT_SUCCESS(rcStrict)
2113 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2114 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2115 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2116 return rcStrict;
2117 }
2118 }
2119 else
2120 {
2121 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2122 if (RT_SUCCESS(rc))
2123 { /* likely */ }
2124 else
2125 {
2126 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2127 return rc;
2128 }
2129 }
2130 pVCpu->iem.s.cbOpcode += cbToTryRead;
2131 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2132
2133 return VINF_SUCCESS;
2134}
2135
2136#endif /* !IEM_WITH_CODE_TLB */
2137#ifndef IEM_WITH_SETJMP
2138
2139/**
2140 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2141 *
2142 * @returns Strict VBox status code.
2143 * @param pVCpu The cross context virtual CPU structure of the
2144 * calling thread.
2145 * @param pb Where to return the opcode byte.
2146 */
2147DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2148{
2149 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2150 if (rcStrict == VINF_SUCCESS)
2151 {
2152 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2153 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2154 pVCpu->iem.s.offOpcode = offOpcode + 1;
2155 }
2156 else
2157 *pb = 0;
2158 return rcStrict;
2159}
2160
2161
2162/**
2163 * Fetches the next opcode byte.
2164 *
2165 * @returns Strict VBox status code.
2166 * @param pVCpu The cross context virtual CPU structure of the
2167 * calling thread.
2168 * @param pu8 Where to return the opcode byte.
2169 */
2170DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2171{
2172 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2173 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2174 {
2175 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2176 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2177 return VINF_SUCCESS;
2178 }
2179 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2180}
2181
2182#else /* IEM_WITH_SETJMP */
2183
2184/**
2185 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2186 *
2187 * @returns The opcode byte.
2188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2189 */
2190DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2191{
2192# ifdef IEM_WITH_CODE_TLB
2193 uint8_t u8;
2194 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2195 return u8;
2196# else
2197 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2198 if (rcStrict == VINF_SUCCESS)
2199 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2200 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2201# endif
2202}
2203
2204
2205/**
2206 * Fetches the next opcode byte, longjmp on error.
2207 *
2208 * @returns The opcode byte.
2209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2210 */
2211DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2212{
2213# ifdef IEM_WITH_CODE_TLB
2214 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2215 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2216 if (RT_LIKELY( pbBuf != NULL
2217 && offBuf < pVCpu->iem.s.cbInstrBuf))
2218 {
2219 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2220 return pbBuf[offBuf];
2221 }
2222# else
2223 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2224 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2225 {
2226 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2227 return pVCpu->iem.s.abOpcode[offOpcode];
2228 }
2229# endif
2230 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2231}
2232
2233#endif /* IEM_WITH_SETJMP */
2234
2235/**
2236 * Fetches the next opcode byte, returns automatically on failure.
2237 *
2238 * @param a_pu8 Where to return the opcode byte.
2239 * @remark Implicitly references pVCpu.
2240 */
2241#ifndef IEM_WITH_SETJMP
2242# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2243 do \
2244 { \
2245 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2246 if (rcStrict2 == VINF_SUCCESS) \
2247 { /* likely */ } \
2248 else \
2249 return rcStrict2; \
2250 } while (0)
2251#else
2252# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2253#endif /* IEM_WITH_SETJMP */
2254
2255
2256#ifndef IEM_WITH_SETJMP
2257/**
2258 * Fetches the next signed byte from the opcode stream.
2259 *
2260 * @returns Strict VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2262 * @param pi8 Where to return the signed byte.
2263 */
2264DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2265{
2266 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2267}
2268#endif /* !IEM_WITH_SETJMP */
2269
2270
2271/**
2272 * Fetches the next signed byte from the opcode stream, returning automatically
2273 * on failure.
2274 *
2275 * @param a_pi8 Where to return the signed byte.
2276 * @remark Implicitly references pVCpu.
2277 */
2278#ifndef IEM_WITH_SETJMP
2279# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2280 do \
2281 { \
2282 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2283 if (rcStrict2 != VINF_SUCCESS) \
2284 return rcStrict2; \
2285 } while (0)
2286#else /* IEM_WITH_SETJMP */
2287# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2288
2289#endif /* IEM_WITH_SETJMP */
2290
2291#ifndef IEM_WITH_SETJMP
2292
2293/**
2294 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2295 *
2296 * @returns Strict VBox status code.
2297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2298 * @param pu16 Where to return the opcode dword.
2299 */
2300DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2301{
2302 uint8_t u8;
2303 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2304 if (rcStrict == VINF_SUCCESS)
2305 *pu16 = (int8_t)u8;
2306 return rcStrict;
2307}
2308
2309
2310/**
2311 * Fetches the next signed byte from the opcode stream, extending it to
2312 * unsigned 16-bit.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu16 Where to return the unsigned word.
2317 */
2318DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2319{
2320 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2321 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2322 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2323
2324 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2325 pVCpu->iem.s.offOpcode = offOpcode + 1;
2326 return VINF_SUCCESS;
2327}
2328
2329#endif /* !IEM_WITH_SETJMP */
2330
2331/**
2332 * Fetches the next signed byte from the opcode stream and sign-extending it to
2333 * a word, returning automatically on failure.
2334 *
2335 * @param a_pu16 Where to return the word.
2336 * @remark Implicitly references pVCpu.
2337 */
2338#ifndef IEM_WITH_SETJMP
2339# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2340 do \
2341 { \
2342 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2343 if (rcStrict2 != VINF_SUCCESS) \
2344 return rcStrict2; \
2345 } while (0)
2346#else
2347# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2348#endif
2349
2350#ifndef IEM_WITH_SETJMP
2351
2352/**
2353 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2354 *
2355 * @returns Strict VBox status code.
2356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2357 * @param pu32 Where to return the opcode dword.
2358 */
2359DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2360{
2361 uint8_t u8;
2362 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2363 if (rcStrict == VINF_SUCCESS)
2364 *pu32 = (int8_t)u8;
2365 return rcStrict;
2366}
2367
2368
2369/**
2370 * Fetches the next signed byte from the opcode stream, extending it to
2371 * unsigned 32-bit.
2372 *
2373 * @returns Strict VBox status code.
2374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2375 * @param pu32 Where to return the unsigned dword.
2376 */
2377DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2378{
2379 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2380 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2381 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2382
2383 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2384 pVCpu->iem.s.offOpcode = offOpcode + 1;
2385 return VINF_SUCCESS;
2386}
2387
2388#endif /* !IEM_WITH_SETJMP */
2389
2390/**
2391 * Fetches the next signed byte from the opcode stream and sign-extending it to
2392 * a word, returning automatically on failure.
2393 *
2394 * @param a_pu32 Where to return the word.
2395 * @remark Implicitly references pVCpu.
2396 */
2397#ifndef IEM_WITH_SETJMP
2398#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2399 do \
2400 { \
2401 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2402 if (rcStrict2 != VINF_SUCCESS) \
2403 return rcStrict2; \
2404 } while (0)
2405#else
2406# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2407#endif
2408
2409#ifndef IEM_WITH_SETJMP
2410
2411/**
2412 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2413 *
2414 * @returns Strict VBox status code.
2415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2416 * @param pu64 Where to return the opcode qword.
2417 */
2418DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2419{
2420 uint8_t u8;
2421 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2422 if (rcStrict == VINF_SUCCESS)
2423 *pu64 = (int8_t)u8;
2424 return rcStrict;
2425}
2426
2427
2428/**
2429 * Fetches the next signed byte from the opcode stream, extending it to
2430 * unsigned 64-bit.
2431 *
2432 * @returns Strict VBox status code.
2433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2434 * @param pu64 Where to return the unsigned qword.
2435 */
2436DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2437{
2438 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2439 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2440 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2441
2442 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2443 pVCpu->iem.s.offOpcode = offOpcode + 1;
2444 return VINF_SUCCESS;
2445}
2446
2447#endif /* !IEM_WITH_SETJMP */
2448
2449
2450/**
2451 * Fetches the next signed byte from the opcode stream and sign-extending it to
2452 * a word, returning automatically on failure.
2453 *
2454 * @param a_pu64 Where to return the word.
2455 * @remark Implicitly references pVCpu.
2456 */
2457#ifndef IEM_WITH_SETJMP
2458# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2459 do \
2460 { \
2461 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2462 if (rcStrict2 != VINF_SUCCESS) \
2463 return rcStrict2; \
2464 } while (0)
2465#else
2466# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2467#endif
2468
2469
2470#ifndef IEM_WITH_SETJMP
2471/**
2472 * Fetches the next opcode byte.
2473 *
2474 * @returns Strict VBox status code.
2475 * @param pVCpu The cross context virtual CPU structure of the
2476 * calling thread.
2477 * @param pu8 Where to return the opcode byte.
2478 */
2479DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2480{
2481 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2482 pVCpu->iem.s.offModRm = offOpcode;
2483 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2484 {
2485 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2486 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2487 return VINF_SUCCESS;
2488 }
2489 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2490}
2491#else /* IEM_WITH_SETJMP */
2492/**
2493 * Fetches the next opcode byte, longjmp on error.
2494 *
2495 * @returns The opcode byte.
2496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2497 */
2498DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2499{
2500# ifdef IEM_WITH_CODE_TLB
2501 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2502 pVCpu->iem.s.offModRm = offBuf;
2503 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2504 if (RT_LIKELY( pbBuf != NULL
2505 && offBuf < pVCpu->iem.s.cbInstrBuf))
2506 {
2507 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2508 return pbBuf[offBuf];
2509 }
2510# else
2511 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2512 pVCpu->iem.s.offModRm = offOpcode;
2513 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2514 {
2515 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2516 return pVCpu->iem.s.abOpcode[offOpcode];
2517 }
2518# endif
2519 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2520}
2521#endif /* IEM_WITH_SETJMP */
2522
2523/**
2524 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2525 * on failure.
2526 *
2527 * Will note down the position of the ModR/M byte for VT-x exits.
2528 *
2529 * @param a_pbRm Where to return the RM opcode byte.
2530 * @remark Implicitly references pVCpu.
2531 */
2532#ifndef IEM_WITH_SETJMP
2533# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2534 do \
2535 { \
2536 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2537 if (rcStrict2 == VINF_SUCCESS) \
2538 { /* likely */ } \
2539 else \
2540 return rcStrict2; \
2541 } while (0)
2542#else
2543# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2544#endif /* IEM_WITH_SETJMP */
2545
2546
2547#ifndef IEM_WITH_SETJMP
2548
2549/**
2550 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2551 *
2552 * @returns Strict VBox status code.
2553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2554 * @param pu16 Where to return the opcode word.
2555 */
2556DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2557{
2558 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2559 if (rcStrict == VINF_SUCCESS)
2560 {
2561 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2562# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2563 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2564# else
2565 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2566# endif
2567 pVCpu->iem.s.offOpcode = offOpcode + 2;
2568 }
2569 else
2570 *pu16 = 0;
2571 return rcStrict;
2572}
2573
2574
2575/**
2576 * Fetches the next opcode word.
2577 *
2578 * @returns Strict VBox status code.
2579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2580 * @param pu16 Where to return the opcode word.
2581 */
2582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2583{
2584 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2585 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2586 {
2587 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2588# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2589 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2590# else
2591 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2592# endif
2593 return VINF_SUCCESS;
2594 }
2595 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2596}
2597
2598#else /* IEM_WITH_SETJMP */
2599
2600/**
2601 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2602 *
2603 * @returns The opcode word.
2604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2605 */
2606DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2607{
2608# ifdef IEM_WITH_CODE_TLB
2609 uint16_t u16;
2610 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2611 return u16;
2612# else
2613 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2614 if (rcStrict == VINF_SUCCESS)
2615 {
2616 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2617 pVCpu->iem.s.offOpcode += 2;
2618# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2619 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2620# else
2621 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2622# endif
2623 }
2624 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2625# endif
2626}
2627
2628
2629/**
2630 * Fetches the next opcode word, longjmp on error.
2631 *
2632 * @returns The opcode word.
2633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2634 */
2635DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2636{
2637# ifdef IEM_WITH_CODE_TLB
2638 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2639 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2640 if (RT_LIKELY( pbBuf != NULL
2641 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2642 {
2643 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2644# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2645 return *(uint16_t const *)&pbBuf[offBuf];
2646# else
2647 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2648# endif
2649 }
2650# else
2651 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2652 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2653 {
2654 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2655# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2656 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2657# else
2658 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2659# endif
2660 }
2661# endif
2662 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2663}
2664
2665#endif /* IEM_WITH_SETJMP */
2666
2667
2668/**
2669 * Fetches the next opcode word, returns automatically on failure.
2670 *
2671 * @param a_pu16 Where to return the opcode word.
2672 * @remark Implicitly references pVCpu.
2673 */
2674#ifndef IEM_WITH_SETJMP
2675# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2676 do \
2677 { \
2678 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2679 if (rcStrict2 != VINF_SUCCESS) \
2680 return rcStrict2; \
2681 } while (0)
2682#else
2683# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2684#endif
2685
2686#ifndef IEM_WITH_SETJMP
2687
2688/**
2689 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2690 *
2691 * @returns Strict VBox status code.
2692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2693 * @param pu32 Where to return the opcode double word.
2694 */
2695DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2696{
2697 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2698 if (rcStrict == VINF_SUCCESS)
2699 {
2700 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2701 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2702 pVCpu->iem.s.offOpcode = offOpcode + 2;
2703 }
2704 else
2705 *pu32 = 0;
2706 return rcStrict;
2707}
2708
2709
2710/**
2711 * Fetches the next opcode word, zero extending it to a double word.
2712 *
2713 * @returns Strict VBox status code.
2714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2715 * @param pu32 Where to return the opcode double word.
2716 */
2717DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2718{
2719 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2720 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2721 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2722
2723 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2724 pVCpu->iem.s.offOpcode = offOpcode + 2;
2725 return VINF_SUCCESS;
2726}
2727
2728#endif /* !IEM_WITH_SETJMP */
2729
2730
2731/**
2732 * Fetches the next opcode word and zero extends it to a double word, returns
2733 * automatically on failure.
2734 *
2735 * @param a_pu32 Where to return the opcode double word.
2736 * @remark Implicitly references pVCpu.
2737 */
2738#ifndef IEM_WITH_SETJMP
2739# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2740 do \
2741 { \
2742 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2743 if (rcStrict2 != VINF_SUCCESS) \
2744 return rcStrict2; \
2745 } while (0)
2746#else
2747# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2748#endif
2749
2750#ifndef IEM_WITH_SETJMP
2751
2752/**
2753 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2754 *
2755 * @returns Strict VBox status code.
2756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2757 * @param pu64 Where to return the opcode quad word.
2758 */
2759DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2760{
2761 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2762 if (rcStrict == VINF_SUCCESS)
2763 {
2764 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2765 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2766 pVCpu->iem.s.offOpcode = offOpcode + 2;
2767 }
2768 else
2769 *pu64 = 0;
2770 return rcStrict;
2771}
2772
2773
2774/**
2775 * Fetches the next opcode word, zero extending it to a quad word.
2776 *
2777 * @returns Strict VBox status code.
2778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2779 * @param pu64 Where to return the opcode quad word.
2780 */
2781DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2782{
2783 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2784 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2785 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2786
2787 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2788 pVCpu->iem.s.offOpcode = offOpcode + 2;
2789 return VINF_SUCCESS;
2790}
2791
2792#endif /* !IEM_WITH_SETJMP */
2793
2794/**
2795 * Fetches the next opcode word and zero extends it to a quad word, returns
2796 * automatically on failure.
2797 *
2798 * @param a_pu64 Where to return the opcode quad word.
2799 * @remark Implicitly references pVCpu.
2800 */
2801#ifndef IEM_WITH_SETJMP
2802# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2803 do \
2804 { \
2805 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2806 if (rcStrict2 != VINF_SUCCESS) \
2807 return rcStrict2; \
2808 } while (0)
2809#else
2810# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2811#endif
2812
2813
2814#ifndef IEM_WITH_SETJMP
2815/**
2816 * Fetches the next signed word from the opcode stream.
2817 *
2818 * @returns Strict VBox status code.
2819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2820 * @param pi16 Where to return the signed word.
2821 */
2822DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2823{
2824 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2825}
2826#endif /* !IEM_WITH_SETJMP */
2827
2828
2829/**
2830 * Fetches the next signed word from the opcode stream, returning automatically
2831 * on failure.
2832 *
2833 * @param a_pi16 Where to return the signed word.
2834 * @remark Implicitly references pVCpu.
2835 */
2836#ifndef IEM_WITH_SETJMP
2837# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2838 do \
2839 { \
2840 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2841 if (rcStrict2 != VINF_SUCCESS) \
2842 return rcStrict2; \
2843 } while (0)
2844#else
2845# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2846#endif
2847
2848#ifndef IEM_WITH_SETJMP
2849
2850/**
2851 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2852 *
2853 * @returns Strict VBox status code.
2854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2855 * @param pu32 Where to return the opcode dword.
2856 */
2857DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2858{
2859 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2860 if (rcStrict == VINF_SUCCESS)
2861 {
2862 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2863# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2864 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2865# else
2866 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2867 pVCpu->iem.s.abOpcode[offOpcode + 1],
2868 pVCpu->iem.s.abOpcode[offOpcode + 2],
2869 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2870# endif
2871 pVCpu->iem.s.offOpcode = offOpcode + 4;
2872 }
2873 else
2874 *pu32 = 0;
2875 return rcStrict;
2876}
2877
2878
2879/**
2880 * Fetches the next opcode dword.
2881 *
2882 * @returns Strict VBox status code.
2883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2884 * @param pu32 Where to return the opcode double word.
2885 */
2886DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2887{
2888 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2889 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2890 {
2891 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2892# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2893 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2894# else
2895 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2896 pVCpu->iem.s.abOpcode[offOpcode + 1],
2897 pVCpu->iem.s.abOpcode[offOpcode + 2],
2898 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2899# endif
2900 return VINF_SUCCESS;
2901 }
2902 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2903}
2904
2905#else /* !IEM_WITH_SETJMP */
2906
2907/**
2908 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2909 *
2910 * @returns The opcode dword.
2911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2912 */
2913DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2914{
2915# ifdef IEM_WITH_CODE_TLB
2916 uint32_t u32;
2917 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2918 return u32;
2919# else
2920 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2921 if (rcStrict == VINF_SUCCESS)
2922 {
2923 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2924 pVCpu->iem.s.offOpcode = offOpcode + 4;
2925# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2926 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2927# else
2928 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2929 pVCpu->iem.s.abOpcode[offOpcode + 1],
2930 pVCpu->iem.s.abOpcode[offOpcode + 2],
2931 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2932# endif
2933 }
2934 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2935# endif
2936}
2937
2938
2939/**
2940 * Fetches the next opcode dword, longjmp on error.
2941 *
2942 * @returns The opcode dword.
2943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2944 */
2945DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2946{
2947# ifdef IEM_WITH_CODE_TLB
2948 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2949 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2950 if (RT_LIKELY( pbBuf != NULL
2951 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2952 {
2953 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2954# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2955 return *(uint32_t const *)&pbBuf[offBuf];
2956# else
2957 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2958 pbBuf[offBuf + 1],
2959 pbBuf[offBuf + 2],
2960 pbBuf[offBuf + 3]);
2961# endif
2962 }
2963# else
2964 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2965 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2966 {
2967 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2968# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2969 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2970# else
2971 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2972 pVCpu->iem.s.abOpcode[offOpcode + 1],
2973 pVCpu->iem.s.abOpcode[offOpcode + 2],
2974 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2975# endif
2976 }
2977# endif
2978 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2979}
2980
2981#endif /* !IEM_WITH_SETJMP */
2982
2983
2984/**
2985 * Fetches the next opcode dword, returns automatically on failure.
2986 *
2987 * @param a_pu32 Where to return the opcode dword.
2988 * @remark Implicitly references pVCpu.
2989 */
2990#ifndef IEM_WITH_SETJMP
2991# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2992 do \
2993 { \
2994 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2995 if (rcStrict2 != VINF_SUCCESS) \
2996 return rcStrict2; \
2997 } while (0)
2998#else
2999# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
3000#endif
3001
3002#ifndef IEM_WITH_SETJMP
3003
3004/**
3005 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3006 *
3007 * @returns Strict VBox status code.
3008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3009 * @param pu64 Where to return the opcode dword.
3010 */
3011DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3012{
3013 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3014 if (rcStrict == VINF_SUCCESS)
3015 {
3016 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3017 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3018 pVCpu->iem.s.abOpcode[offOpcode + 1],
3019 pVCpu->iem.s.abOpcode[offOpcode + 2],
3020 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3021 pVCpu->iem.s.offOpcode = offOpcode + 4;
3022 }
3023 else
3024 *pu64 = 0;
3025 return rcStrict;
3026}
3027
3028
3029/**
3030 * Fetches the next opcode dword, zero extending it to a quad word.
3031 *
3032 * @returns Strict VBox status code.
3033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3034 * @param pu64 Where to return the opcode quad word.
3035 */
3036DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3037{
3038 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3039 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3040 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3041
3042 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3043 pVCpu->iem.s.abOpcode[offOpcode + 1],
3044 pVCpu->iem.s.abOpcode[offOpcode + 2],
3045 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3046 pVCpu->iem.s.offOpcode = offOpcode + 4;
3047 return VINF_SUCCESS;
3048}
3049
3050#endif /* !IEM_WITH_SETJMP */
3051
3052
3053/**
3054 * Fetches the next opcode dword and zero extends it to a quad word, returns
3055 * automatically on failure.
3056 *
3057 * @param a_pu64 Where to return the opcode quad word.
3058 * @remark Implicitly references pVCpu.
3059 */
3060#ifndef IEM_WITH_SETJMP
3061# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3062 do \
3063 { \
3064 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3065 if (rcStrict2 != VINF_SUCCESS) \
3066 return rcStrict2; \
3067 } while (0)
3068#else
3069# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3070#endif
3071
3072
3073#ifndef IEM_WITH_SETJMP
3074/**
3075 * Fetches the next signed double word from the opcode stream.
3076 *
3077 * @returns Strict VBox status code.
3078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3079 * @param pi32 Where to return the signed double word.
3080 */
3081DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3082{
3083 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3084}
3085#endif
3086
3087/**
3088 * Fetches the next signed double word from the opcode stream, returning
3089 * automatically on failure.
3090 *
3091 * @param a_pi32 Where to return the signed double word.
3092 * @remark Implicitly references pVCpu.
3093 */
3094#ifndef IEM_WITH_SETJMP
3095# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3096 do \
3097 { \
3098 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3099 if (rcStrict2 != VINF_SUCCESS) \
3100 return rcStrict2; \
3101 } while (0)
3102#else
3103# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3104#endif
3105
3106#ifndef IEM_WITH_SETJMP
3107
3108/**
3109 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3110 *
3111 * @returns Strict VBox status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param pu64 Where to return the opcode qword.
3114 */
3115DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3116{
3117 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3118 if (rcStrict == VINF_SUCCESS)
3119 {
3120 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3121 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3122 pVCpu->iem.s.abOpcode[offOpcode + 1],
3123 pVCpu->iem.s.abOpcode[offOpcode + 2],
3124 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3125 pVCpu->iem.s.offOpcode = offOpcode + 4;
3126 }
3127 else
3128 *pu64 = 0;
3129 return rcStrict;
3130}
3131
3132
3133/**
3134 * Fetches the next opcode dword, sign extending it into a quad word.
3135 *
3136 * @returns Strict VBox status code.
3137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3138 * @param pu64 Where to return the opcode quad word.
3139 */
3140DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3141{
3142 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3143 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3144 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3145
3146 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3147 pVCpu->iem.s.abOpcode[offOpcode + 1],
3148 pVCpu->iem.s.abOpcode[offOpcode + 2],
3149 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3150 *pu64 = i32;
3151 pVCpu->iem.s.offOpcode = offOpcode + 4;
3152 return VINF_SUCCESS;
3153}
3154
3155#endif /* !IEM_WITH_SETJMP */
3156
3157
3158/**
3159 * Fetches the next opcode double word and sign extends it to a quad word,
3160 * returns automatically on failure.
3161 *
3162 * @param a_pu64 Where to return the opcode quad word.
3163 * @remark Implicitly references pVCpu.
3164 */
3165#ifndef IEM_WITH_SETJMP
3166# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3167 do \
3168 { \
3169 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3170 if (rcStrict2 != VINF_SUCCESS) \
3171 return rcStrict2; \
3172 } while (0)
3173#else
3174# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3175#endif
3176
3177#ifndef IEM_WITH_SETJMP
3178
3179/**
3180 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3181 *
3182 * @returns Strict VBox status code.
3183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3184 * @param pu64 Where to return the opcode qword.
3185 */
3186DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3187{
3188 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3189 if (rcStrict == VINF_SUCCESS)
3190 {
3191 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3192# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3193 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3194# else
3195 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3196 pVCpu->iem.s.abOpcode[offOpcode + 1],
3197 pVCpu->iem.s.abOpcode[offOpcode + 2],
3198 pVCpu->iem.s.abOpcode[offOpcode + 3],
3199 pVCpu->iem.s.abOpcode[offOpcode + 4],
3200 pVCpu->iem.s.abOpcode[offOpcode + 5],
3201 pVCpu->iem.s.abOpcode[offOpcode + 6],
3202 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3203# endif
3204 pVCpu->iem.s.offOpcode = offOpcode + 8;
3205 }
3206 else
3207 *pu64 = 0;
3208 return rcStrict;
3209}
3210
3211
3212/**
3213 * Fetches the next opcode qword.
3214 *
3215 * @returns Strict VBox status code.
3216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3217 * @param pu64 Where to return the opcode qword.
3218 */
3219DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3220{
3221 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3222 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3223 {
3224# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3225 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3226# else
3227 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3228 pVCpu->iem.s.abOpcode[offOpcode + 1],
3229 pVCpu->iem.s.abOpcode[offOpcode + 2],
3230 pVCpu->iem.s.abOpcode[offOpcode + 3],
3231 pVCpu->iem.s.abOpcode[offOpcode + 4],
3232 pVCpu->iem.s.abOpcode[offOpcode + 5],
3233 pVCpu->iem.s.abOpcode[offOpcode + 6],
3234 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3235# endif
3236 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3237 return VINF_SUCCESS;
3238 }
3239 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3240}
3241
3242#else /* IEM_WITH_SETJMP */
3243
3244/**
3245 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3246 *
3247 * @returns The opcode qword.
3248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3249 */
3250DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3251{
3252# ifdef IEM_WITH_CODE_TLB
3253 uint64_t u64;
3254 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3255 return u64;
3256# else
3257 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3258 if (rcStrict == VINF_SUCCESS)
3259 {
3260 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3261 pVCpu->iem.s.offOpcode = offOpcode + 8;
3262# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3263 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3264# else
3265 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3266 pVCpu->iem.s.abOpcode[offOpcode + 1],
3267 pVCpu->iem.s.abOpcode[offOpcode + 2],
3268 pVCpu->iem.s.abOpcode[offOpcode + 3],
3269 pVCpu->iem.s.abOpcode[offOpcode + 4],
3270 pVCpu->iem.s.abOpcode[offOpcode + 5],
3271 pVCpu->iem.s.abOpcode[offOpcode + 6],
3272 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3273# endif
3274 }
3275 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3276# endif
3277}
3278
3279
3280/**
3281 * Fetches the next opcode qword, longjmp on error.
3282 *
3283 * @returns The opcode qword.
3284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3285 */
3286DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3287{
3288# ifdef IEM_WITH_CODE_TLB
3289 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3290 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3291 if (RT_LIKELY( pbBuf != NULL
3292 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3293 {
3294 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3295# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3296 return *(uint64_t const *)&pbBuf[offBuf];
3297# else
3298 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3299 pbBuf[offBuf + 1],
3300 pbBuf[offBuf + 2],
3301 pbBuf[offBuf + 3],
3302 pbBuf[offBuf + 4],
3303 pbBuf[offBuf + 5],
3304 pbBuf[offBuf + 6],
3305 pbBuf[offBuf + 7]);
3306# endif
3307 }
3308# else
3309 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3310 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3311 {
3312 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3313# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3314 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3315# else
3316 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3317 pVCpu->iem.s.abOpcode[offOpcode + 1],
3318 pVCpu->iem.s.abOpcode[offOpcode + 2],
3319 pVCpu->iem.s.abOpcode[offOpcode + 3],
3320 pVCpu->iem.s.abOpcode[offOpcode + 4],
3321 pVCpu->iem.s.abOpcode[offOpcode + 5],
3322 pVCpu->iem.s.abOpcode[offOpcode + 6],
3323 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3324# endif
3325 }
3326# endif
3327 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3328}
3329
3330#endif /* IEM_WITH_SETJMP */
3331
3332/**
3333 * Fetches the next opcode quad word, returns automatically on failure.
3334 *
3335 * @param a_pu64 Where to return the opcode quad word.
3336 * @remark Implicitly references pVCpu.
3337 */
3338#ifndef IEM_WITH_SETJMP
3339# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3340 do \
3341 { \
3342 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3343 if (rcStrict2 != VINF_SUCCESS) \
3344 return rcStrict2; \
3345 } while (0)
3346#else
3347# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3348#endif
3349
3350
3351/** @name Misc Worker Functions.
3352 * @{
3353 */
3354
3355/**
3356 * Gets the exception class for the specified exception vector.
3357 *
3358 * @returns The class of the specified exception.
3359 * @param uVector The exception vector.
3360 */
3361IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3362{
3363 Assert(uVector <= X86_XCPT_LAST);
3364 switch (uVector)
3365 {
3366 case X86_XCPT_DE:
3367 case X86_XCPT_TS:
3368 case X86_XCPT_NP:
3369 case X86_XCPT_SS:
3370 case X86_XCPT_GP:
3371 case X86_XCPT_SX: /* AMD only */
3372 return IEMXCPTCLASS_CONTRIBUTORY;
3373
3374 case X86_XCPT_PF:
3375 case X86_XCPT_VE: /* Intel only */
3376 return IEMXCPTCLASS_PAGE_FAULT;
3377
3378 case X86_XCPT_DF:
3379 return IEMXCPTCLASS_DOUBLE_FAULT;
3380 }
3381 return IEMXCPTCLASS_BENIGN;
3382}
3383
3384
3385/**
3386 * Evaluates how to handle an exception caused during delivery of another event
3387 * (exception / interrupt).
3388 *
3389 * @returns How to handle the recursive exception.
3390 * @param pVCpu The cross context virtual CPU structure of the
3391 * calling thread.
3392 * @param fPrevFlags The flags of the previous event.
3393 * @param uPrevVector The vector of the previous event.
3394 * @param fCurFlags The flags of the current exception.
3395 * @param uCurVector The vector of the current exception.
3396 * @param pfXcptRaiseInfo Where to store additional information about the
3397 * exception condition. Optional.
3398 */
3399VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3400 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3401{
3402 /*
3403 * Only CPU exceptions can be raised while delivering other events, software interrupt
3404 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3405 */
3406 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3407 Assert(pVCpu); RT_NOREF(pVCpu);
3408 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3409
3410 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3411 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3412 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3413 {
3414 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3415 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3416 {
3417 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3418 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3419 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3420 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3421 {
3422 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3423 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3424 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3425 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3426 uCurVector, pVCpu->cpum.GstCtx.cr2));
3427 }
3428 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3429 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3430 {
3431 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3432 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3433 }
3434 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3435 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3436 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3437 {
3438 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3439 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3440 }
3441 }
3442 else
3443 {
3444 if (uPrevVector == X86_XCPT_NMI)
3445 {
3446 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3447 if (uCurVector == X86_XCPT_PF)
3448 {
3449 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3450 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3451 }
3452 }
3453 else if ( uPrevVector == X86_XCPT_AC
3454 && uCurVector == X86_XCPT_AC)
3455 {
3456 enmRaise = IEMXCPTRAISE_CPU_HANG;
3457 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3458 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3459 }
3460 }
3461 }
3462 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3463 {
3464 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3465 if (uCurVector == X86_XCPT_PF)
3466 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3467 }
3468 else
3469 {
3470 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3471 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3472 }
3473
3474 if (pfXcptRaiseInfo)
3475 *pfXcptRaiseInfo = fRaiseInfo;
3476 return enmRaise;
3477}
3478
3479
3480/**
3481 * Enters the CPU shutdown state initiated by a triple fault or other
3482 * unrecoverable conditions.
3483 *
3484 * @returns Strict VBox status code.
3485 * @param pVCpu The cross context virtual CPU structure of the
3486 * calling thread.
3487 */
3488IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3489{
3490 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3491 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3492
3493 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3494 {
3495 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3496 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3497 }
3498
3499 RT_NOREF(pVCpu);
3500 return VINF_EM_TRIPLE_FAULT;
3501}
3502
3503
3504/**
3505 * Validates a new SS segment.
3506 *
3507 * @returns VBox strict status code.
3508 * @param pVCpu The cross context virtual CPU structure of the
3509 * calling thread.
3510 * @param NewSS The new SS selctor.
3511 * @param uCpl The CPL to load the stack for.
3512 * @param pDesc Where to return the descriptor.
3513 */
3514IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3515{
3516 /* Null selectors are not allowed (we're not called for dispatching
3517 interrupts with SS=0 in long mode). */
3518 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3519 {
3520 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3521 return iemRaiseTaskSwitchFault0(pVCpu);
3522 }
3523
3524 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3525 if ((NewSS & X86_SEL_RPL) != uCpl)
3526 {
3527 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3528 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3529 }
3530
3531 /*
3532 * Read the descriptor.
3533 */
3534 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3535 if (rcStrict != VINF_SUCCESS)
3536 return rcStrict;
3537
3538 /*
3539 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3540 */
3541 if (!pDesc->Legacy.Gen.u1DescType)
3542 {
3543 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3544 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3545 }
3546
3547 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3548 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3549 {
3550 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3551 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3552 }
3553 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3554 {
3555 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3556 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3557 }
3558
3559 /* Is it there? */
3560 /** @todo testcase: Is this checked before the canonical / limit check below? */
3561 if (!pDesc->Legacy.Gen.u1Present)
3562 {
3563 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3564 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3565 }
3566
3567 return VINF_SUCCESS;
3568}
3569
3570
3571/**
3572 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3573 * not.
3574 *
3575 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3576 */
3577#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3578# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3579#else
3580# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3581#endif
3582
3583/**
3584 * Updates the EFLAGS in the correct manner wrt. PATM.
3585 *
3586 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3587 * @param a_fEfl The new EFLAGS.
3588 */
3589#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3590# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3591#else
3592# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3593#endif
3594
3595
3596/** @} */
3597
3598/** @name Raising Exceptions.
3599 *
3600 * @{
3601 */
3602
3603
3604/**
3605 * Loads the specified stack far pointer from the TSS.
3606 *
3607 * @returns VBox strict status code.
3608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3609 * @param uCpl The CPL to load the stack for.
3610 * @param pSelSS Where to return the new stack segment.
3611 * @param puEsp Where to return the new stack pointer.
3612 */
3613IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3614{
3615 VBOXSTRICTRC rcStrict;
3616 Assert(uCpl < 4);
3617
3618 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3619 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3620 {
3621 /*
3622 * 16-bit TSS (X86TSS16).
3623 */
3624 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3625 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3626 {
3627 uint32_t off = uCpl * 4 + 2;
3628 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3629 {
3630 /** @todo check actual access pattern here. */
3631 uint32_t u32Tmp = 0; /* gcc maybe... */
3632 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3633 if (rcStrict == VINF_SUCCESS)
3634 {
3635 *puEsp = RT_LOWORD(u32Tmp);
3636 *pSelSS = RT_HIWORD(u32Tmp);
3637 return VINF_SUCCESS;
3638 }
3639 }
3640 else
3641 {
3642 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3643 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3644 }
3645 break;
3646 }
3647
3648 /*
3649 * 32-bit TSS (X86TSS32).
3650 */
3651 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3652 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3653 {
3654 uint32_t off = uCpl * 8 + 4;
3655 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3656 {
3657/** @todo check actual access pattern here. */
3658 uint64_t u64Tmp;
3659 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3660 if (rcStrict == VINF_SUCCESS)
3661 {
3662 *puEsp = u64Tmp & UINT32_MAX;
3663 *pSelSS = (RTSEL)(u64Tmp >> 32);
3664 return VINF_SUCCESS;
3665 }
3666 }
3667 else
3668 {
3669 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3670 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3671 }
3672 break;
3673 }
3674
3675 default:
3676 AssertFailed();
3677 rcStrict = VERR_IEM_IPE_4;
3678 break;
3679 }
3680
3681 *puEsp = 0; /* make gcc happy */
3682 *pSelSS = 0; /* make gcc happy */
3683 return rcStrict;
3684}
3685
3686
3687/**
3688 * Loads the specified stack pointer from the 64-bit TSS.
3689 *
3690 * @returns VBox strict status code.
3691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3692 * @param uCpl The CPL to load the stack for.
3693 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3694 * @param puRsp Where to return the new stack pointer.
3695 */
3696IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3697{
3698 Assert(uCpl < 4);
3699 Assert(uIst < 8);
3700 *puRsp = 0; /* make gcc happy */
3701
3702 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3703 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3704
3705 uint32_t off;
3706 if (uIst)
3707 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3708 else
3709 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3710 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3711 {
3712 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3713 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3714 }
3715
3716 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3717}
3718
3719
3720/**
3721 * Adjust the CPU state according to the exception being raised.
3722 *
3723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3724 * @param u8Vector The exception that has been raised.
3725 */
3726DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3727{
3728 switch (u8Vector)
3729 {
3730 case X86_XCPT_DB:
3731 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3732 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3733 break;
3734 /** @todo Read the AMD and Intel exception reference... */
3735 }
3736}
3737
3738
3739/**
3740 * Implements exceptions and interrupts for real mode.
3741 *
3742 * @returns VBox strict status code.
3743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3744 * @param cbInstr The number of bytes to offset rIP by in the return
3745 * address.
3746 * @param u8Vector The interrupt / exception vector number.
3747 * @param fFlags The flags.
3748 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3749 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3750 */
3751IEM_STATIC VBOXSTRICTRC
3752iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3753 uint8_t cbInstr,
3754 uint8_t u8Vector,
3755 uint32_t fFlags,
3756 uint16_t uErr,
3757 uint64_t uCr2)
3758{
3759 NOREF(uErr); NOREF(uCr2);
3760 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3761
3762 /*
3763 * Read the IDT entry.
3764 */
3765 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3766 {
3767 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3768 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3769 }
3770 RTFAR16 Idte;
3771 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3772 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3773 {
3774 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3775 return rcStrict;
3776 }
3777
3778 /*
3779 * Push the stack frame.
3780 */
3781 uint16_t *pu16Frame;
3782 uint64_t uNewRsp;
3783 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3784 if (rcStrict != VINF_SUCCESS)
3785 return rcStrict;
3786
3787 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3788#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3789 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3790 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3791 fEfl |= UINT16_C(0xf000);
3792#endif
3793 pu16Frame[2] = (uint16_t)fEfl;
3794 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3795 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3796 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3797 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3798 return rcStrict;
3799
3800 /*
3801 * Load the vector address into cs:ip and make exception specific state
3802 * adjustments.
3803 */
3804 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3805 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3806 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3807 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3808 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3809 pVCpu->cpum.GstCtx.rip = Idte.off;
3810 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3811 IEMMISC_SET_EFL(pVCpu, fEfl);
3812
3813 /** @todo do we actually do this in real mode? */
3814 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3815 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3816
3817 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3818}
3819
3820
3821/**
3822 * Loads a NULL data selector into when coming from V8086 mode.
3823 *
3824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3825 * @param pSReg Pointer to the segment register.
3826 */
3827IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3828{
3829 pSReg->Sel = 0;
3830 pSReg->ValidSel = 0;
3831 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3832 {
3833 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3834 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3835 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3836 }
3837 else
3838 {
3839 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3840 /** @todo check this on AMD-V */
3841 pSReg->u64Base = 0;
3842 pSReg->u32Limit = 0;
3843 }
3844}
3845
3846
3847/**
3848 * Loads a segment selector during a task switch in V8086 mode.
3849 *
3850 * @param pSReg Pointer to the segment register.
3851 * @param uSel The selector value to load.
3852 */
3853IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3854{
3855 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3856 pSReg->Sel = uSel;
3857 pSReg->ValidSel = uSel;
3858 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3859 pSReg->u64Base = uSel << 4;
3860 pSReg->u32Limit = 0xffff;
3861 pSReg->Attr.u = 0xf3;
3862}
3863
3864
3865/**
3866 * Loads a NULL data selector into a selector register, both the hidden and
3867 * visible parts, in protected mode.
3868 *
3869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3870 * @param pSReg Pointer to the segment register.
3871 * @param uRpl The RPL.
3872 */
3873IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3874{
3875 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3876 * data selector in protected mode. */
3877 pSReg->Sel = uRpl;
3878 pSReg->ValidSel = uRpl;
3879 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3880 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3881 {
3882 /* VT-x (Intel 3960x) observed doing something like this. */
3883 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3884 pSReg->u32Limit = UINT32_MAX;
3885 pSReg->u64Base = 0;
3886 }
3887 else
3888 {
3889 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3890 pSReg->u32Limit = 0;
3891 pSReg->u64Base = 0;
3892 }
3893}
3894
3895
3896/**
3897 * Loads a segment selector during a task switch in protected mode.
3898 *
3899 * In this task switch scenario, we would throw \#TS exceptions rather than
3900 * \#GPs.
3901 *
3902 * @returns VBox strict status code.
3903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3904 * @param pSReg Pointer to the segment register.
3905 * @param uSel The new selector value.
3906 *
3907 * @remarks This does _not_ handle CS or SS.
3908 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3909 */
3910IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3911{
3912 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3913
3914 /* Null data selector. */
3915 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3916 {
3917 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3918 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3919 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3920 return VINF_SUCCESS;
3921 }
3922
3923 /* Fetch the descriptor. */
3924 IEMSELDESC Desc;
3925 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3926 if (rcStrict != VINF_SUCCESS)
3927 {
3928 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3929 VBOXSTRICTRC_VAL(rcStrict)));
3930 return rcStrict;
3931 }
3932
3933 /* Must be a data segment or readable code segment. */
3934 if ( !Desc.Legacy.Gen.u1DescType
3935 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3936 {
3937 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3938 Desc.Legacy.Gen.u4Type));
3939 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3940 }
3941
3942 /* Check privileges for data segments and non-conforming code segments. */
3943 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3944 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3945 {
3946 /* The RPL and the new CPL must be less than or equal to the DPL. */
3947 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3948 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3949 {
3950 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3951 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3952 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3953 }
3954 }
3955
3956 /* Is it there? */
3957 if (!Desc.Legacy.Gen.u1Present)
3958 {
3959 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3960 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3961 }
3962
3963 /* The base and limit. */
3964 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3965 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3966
3967 /*
3968 * Ok, everything checked out fine. Now set the accessed bit before
3969 * committing the result into the registers.
3970 */
3971 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3972 {
3973 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3974 if (rcStrict != VINF_SUCCESS)
3975 return rcStrict;
3976 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3977 }
3978
3979 /* Commit */
3980 pSReg->Sel = uSel;
3981 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3982 pSReg->u32Limit = cbLimit;
3983 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3984 pSReg->ValidSel = uSel;
3985 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3986 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3987 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3988
3989 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3990 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3991 return VINF_SUCCESS;
3992}
3993
3994
3995/**
3996 * Performs a task switch.
3997 *
3998 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3999 * caller is responsible for performing the necessary checks (like DPL, TSS
4000 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
4001 * reference for JMP, CALL, IRET.
4002 *
4003 * If the task switch is the due to a software interrupt or hardware exception,
4004 * the caller is responsible for validating the TSS selector and descriptor. See
4005 * Intel Instruction reference for INT n.
4006 *
4007 * @returns VBox strict status code.
4008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4009 * @param enmTaskSwitch The cause of the task switch.
4010 * @param uNextEip The EIP effective after the task switch.
4011 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4012 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4013 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4014 * @param SelTSS The TSS selector of the new task.
4015 * @param pNewDescTSS Pointer to the new TSS descriptor.
4016 */
4017IEM_STATIC VBOXSTRICTRC
4018iemTaskSwitch(PVMCPU pVCpu,
4019 IEMTASKSWITCH enmTaskSwitch,
4020 uint32_t uNextEip,
4021 uint32_t fFlags,
4022 uint16_t uErr,
4023 uint64_t uCr2,
4024 RTSEL SelTSS,
4025 PIEMSELDESC pNewDescTSS)
4026{
4027 Assert(!IEM_IS_REAL_MODE(pVCpu));
4028 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4029 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4030
4031 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4032 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4033 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4034 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4035 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4036
4037 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4038 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4039
4040 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4041 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4042
4043 /* Update CR2 in case it's a page-fault. */
4044 /** @todo This should probably be done much earlier in IEM/PGM. See
4045 * @bugref{5653#c49}. */
4046 if (fFlags & IEM_XCPT_FLAGS_CR2)
4047 pVCpu->cpum.GstCtx.cr2 = uCr2;
4048
4049 /*
4050 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4051 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4052 */
4053 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4054 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4055 if (uNewTSSLimit < uNewTSSLimitMin)
4056 {
4057 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4058 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4059 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4060 }
4061
4062 /*
4063 * Task switches in VMX non-root mode always cause task switches.
4064 * The new TSS must have been read and validated (DPL, limits etc.) before a
4065 * task-switch VM-exit commences.
4066 *
4067 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4068 */
4069 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4070 {
4071 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4072 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4073 }
4074
4075 /*
4076 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4077 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4078 */
4079 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4080 {
4081 uint32_t const uExitInfo1 = SelTSS;
4082 uint32_t uExitInfo2 = uErr;
4083 switch (enmTaskSwitch)
4084 {
4085 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4086 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4087 default: break;
4088 }
4089 if (fFlags & IEM_XCPT_FLAGS_ERR)
4090 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4091 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4092 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4093
4094 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4095 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4096 RT_NOREF2(uExitInfo1, uExitInfo2);
4097 }
4098
4099 /*
4100 * Check the current TSS limit. The last written byte to the current TSS during the
4101 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4102 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4103 *
4104 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4105 * end up with smaller than "legal" TSS limits.
4106 */
4107 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4108 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4109 if (uCurTSSLimit < uCurTSSLimitMin)
4110 {
4111 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4112 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4113 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4114 }
4115
4116 /*
4117 * Verify that the new TSS can be accessed and map it. Map only the required contents
4118 * and not the entire TSS.
4119 */
4120 void *pvNewTSS;
4121 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4122 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4123 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4124 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4125 * not perform correct translation if this happens. See Intel spec. 7.2.1
4126 * "Task-State Segment" */
4127 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4128 if (rcStrict != VINF_SUCCESS)
4129 {
4130 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4131 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4132 return rcStrict;
4133 }
4134
4135 /*
4136 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4137 */
4138 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4139 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4140 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4141 {
4142 PX86DESC pDescCurTSS;
4143 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4144 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4145 if (rcStrict != VINF_SUCCESS)
4146 {
4147 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4148 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4149 return rcStrict;
4150 }
4151
4152 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4153 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4154 if (rcStrict != VINF_SUCCESS)
4155 {
4156 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4157 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4158 return rcStrict;
4159 }
4160
4161 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4162 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4163 {
4164 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4165 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4166 u32EFlags &= ~X86_EFL_NT;
4167 }
4168 }
4169
4170 /*
4171 * Save the CPU state into the current TSS.
4172 */
4173 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4174 if (GCPtrNewTSS == GCPtrCurTSS)
4175 {
4176 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4177 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4178 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4179 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4180 pVCpu->cpum.GstCtx.ldtr.Sel));
4181 }
4182 if (fIsNewTSS386)
4183 {
4184 /*
4185 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4186 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4187 */
4188 void *pvCurTSS32;
4189 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4190 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4191 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4192 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4193 if (rcStrict != VINF_SUCCESS)
4194 {
4195 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4196 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4197 return rcStrict;
4198 }
4199
4200 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4201 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4202 pCurTSS32->eip = uNextEip;
4203 pCurTSS32->eflags = u32EFlags;
4204 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4205 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4206 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4207 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4208 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4209 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4210 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4211 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4212 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4213 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4214 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4215 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4216 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4217 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4218
4219 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4220 if (rcStrict != VINF_SUCCESS)
4221 {
4222 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4223 VBOXSTRICTRC_VAL(rcStrict)));
4224 return rcStrict;
4225 }
4226 }
4227 else
4228 {
4229 /*
4230 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4231 */
4232 void *pvCurTSS16;
4233 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4234 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4235 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4236 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4237 if (rcStrict != VINF_SUCCESS)
4238 {
4239 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4240 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4241 return rcStrict;
4242 }
4243
4244 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4245 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4246 pCurTSS16->ip = uNextEip;
4247 pCurTSS16->flags = u32EFlags;
4248 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4249 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4250 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4251 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4252 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4253 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4254 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4255 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4256 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4257 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4258 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4259 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4260
4261 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4262 if (rcStrict != VINF_SUCCESS)
4263 {
4264 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4265 VBOXSTRICTRC_VAL(rcStrict)));
4266 return rcStrict;
4267 }
4268 }
4269
4270 /*
4271 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4272 */
4273 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4274 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4275 {
4276 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4277 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4278 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4279 }
4280
4281 /*
4282 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4283 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4284 */
4285 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4286 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4287 bool fNewDebugTrap;
4288 if (fIsNewTSS386)
4289 {
4290 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4291 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4292 uNewEip = pNewTSS32->eip;
4293 uNewEflags = pNewTSS32->eflags;
4294 uNewEax = pNewTSS32->eax;
4295 uNewEcx = pNewTSS32->ecx;
4296 uNewEdx = pNewTSS32->edx;
4297 uNewEbx = pNewTSS32->ebx;
4298 uNewEsp = pNewTSS32->esp;
4299 uNewEbp = pNewTSS32->ebp;
4300 uNewEsi = pNewTSS32->esi;
4301 uNewEdi = pNewTSS32->edi;
4302 uNewES = pNewTSS32->es;
4303 uNewCS = pNewTSS32->cs;
4304 uNewSS = pNewTSS32->ss;
4305 uNewDS = pNewTSS32->ds;
4306 uNewFS = pNewTSS32->fs;
4307 uNewGS = pNewTSS32->gs;
4308 uNewLdt = pNewTSS32->selLdt;
4309 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4310 }
4311 else
4312 {
4313 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4314 uNewCr3 = 0;
4315 uNewEip = pNewTSS16->ip;
4316 uNewEflags = pNewTSS16->flags;
4317 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4318 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4319 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4320 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4321 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4322 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4323 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4324 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4325 uNewES = pNewTSS16->es;
4326 uNewCS = pNewTSS16->cs;
4327 uNewSS = pNewTSS16->ss;
4328 uNewDS = pNewTSS16->ds;
4329 uNewFS = 0;
4330 uNewGS = 0;
4331 uNewLdt = pNewTSS16->selLdt;
4332 fNewDebugTrap = false;
4333 }
4334
4335 if (GCPtrNewTSS == GCPtrCurTSS)
4336 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4337 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4338
4339 /*
4340 * We're done accessing the new TSS.
4341 */
4342 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4343 if (rcStrict != VINF_SUCCESS)
4344 {
4345 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4346 return rcStrict;
4347 }
4348
4349 /*
4350 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4351 */
4352 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4353 {
4354 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4355 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4356 if (rcStrict != VINF_SUCCESS)
4357 {
4358 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4359 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4360 return rcStrict;
4361 }
4362
4363 /* Check that the descriptor indicates the new TSS is available (not busy). */
4364 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4365 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4366 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4367
4368 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4369 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4370 if (rcStrict != VINF_SUCCESS)
4371 {
4372 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4373 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4374 return rcStrict;
4375 }
4376 }
4377
4378 /*
4379 * From this point on, we're technically in the new task. We will defer exceptions
4380 * until the completion of the task switch but before executing any instructions in the new task.
4381 */
4382 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4383 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4384 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4385 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4386 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4387 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4388 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4389
4390 /* Set the busy bit in TR. */
4391 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4392 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4393 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4394 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4395 {
4396 uNewEflags |= X86_EFL_NT;
4397 }
4398
4399 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4400 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4401 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4402
4403 pVCpu->cpum.GstCtx.eip = uNewEip;
4404 pVCpu->cpum.GstCtx.eax = uNewEax;
4405 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4406 pVCpu->cpum.GstCtx.edx = uNewEdx;
4407 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4408 pVCpu->cpum.GstCtx.esp = uNewEsp;
4409 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4410 pVCpu->cpum.GstCtx.esi = uNewEsi;
4411 pVCpu->cpum.GstCtx.edi = uNewEdi;
4412
4413 uNewEflags &= X86_EFL_LIVE_MASK;
4414 uNewEflags |= X86_EFL_RA1_MASK;
4415 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4416
4417 /*
4418 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4419 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4420 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4421 */
4422 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4423 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4424
4425 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4426 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4427
4428 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4429 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4430
4431 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4432 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4433
4434 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4435 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4436
4437 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4438 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4439 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4440
4441 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4442 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4443 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4444 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4445
4446 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4447 {
4448 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4449 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4450 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4451 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4452 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4453 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4454 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4455 }
4456
4457 /*
4458 * Switch CR3 for the new task.
4459 */
4460 if ( fIsNewTSS386
4461 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4462 {
4463 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4464 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4465 AssertRCSuccessReturn(rc, rc);
4466
4467 /* Inform PGM. */
4468 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4469 AssertRCReturn(rc, rc);
4470 /* ignore informational status codes */
4471
4472 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4473 }
4474
4475 /*
4476 * Switch LDTR for the new task.
4477 */
4478 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4479 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4480 else
4481 {
4482 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4483
4484 IEMSELDESC DescNewLdt;
4485 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4486 if (rcStrict != VINF_SUCCESS)
4487 {
4488 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4489 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4490 return rcStrict;
4491 }
4492 if ( !DescNewLdt.Legacy.Gen.u1Present
4493 || DescNewLdt.Legacy.Gen.u1DescType
4494 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4495 {
4496 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4497 uNewLdt, DescNewLdt.Legacy.u));
4498 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4499 }
4500
4501 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4502 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4503 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4504 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4505 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4506 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4507 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4508 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4509 }
4510
4511 IEMSELDESC DescSS;
4512 if (IEM_IS_V86_MODE(pVCpu))
4513 {
4514 pVCpu->iem.s.uCpl = 3;
4515 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4516 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4517 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4518 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4519 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4520 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4521
4522 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4523 DescSS.Legacy.u = 0;
4524 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4525 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4526 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4527 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4528 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4529 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4530 DescSS.Legacy.Gen.u2Dpl = 3;
4531 }
4532 else
4533 {
4534 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4535
4536 /*
4537 * Load the stack segment for the new task.
4538 */
4539 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4540 {
4541 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4542 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4543 }
4544
4545 /* Fetch the descriptor. */
4546 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4547 if (rcStrict != VINF_SUCCESS)
4548 {
4549 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4550 VBOXSTRICTRC_VAL(rcStrict)));
4551 return rcStrict;
4552 }
4553
4554 /* SS must be a data segment and writable. */
4555 if ( !DescSS.Legacy.Gen.u1DescType
4556 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4557 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4558 {
4559 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4560 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4561 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4562 }
4563
4564 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4565 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4566 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4567 {
4568 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4569 uNewCpl));
4570 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4571 }
4572
4573 /* Is it there? */
4574 if (!DescSS.Legacy.Gen.u1Present)
4575 {
4576 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4577 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4578 }
4579
4580 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4581 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4582
4583 /* Set the accessed bit before committing the result into SS. */
4584 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4585 {
4586 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4587 if (rcStrict != VINF_SUCCESS)
4588 return rcStrict;
4589 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4590 }
4591
4592 /* Commit SS. */
4593 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4594 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4595 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4596 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4597 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4598 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4599 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4600
4601 /* CPL has changed, update IEM before loading rest of segments. */
4602 pVCpu->iem.s.uCpl = uNewCpl;
4603
4604 /*
4605 * Load the data segments for the new task.
4606 */
4607 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4608 if (rcStrict != VINF_SUCCESS)
4609 return rcStrict;
4610 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4611 if (rcStrict != VINF_SUCCESS)
4612 return rcStrict;
4613 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4614 if (rcStrict != VINF_SUCCESS)
4615 return rcStrict;
4616 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4617 if (rcStrict != VINF_SUCCESS)
4618 return rcStrict;
4619
4620 /*
4621 * Load the code segment for the new task.
4622 */
4623 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4624 {
4625 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4626 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4627 }
4628
4629 /* Fetch the descriptor. */
4630 IEMSELDESC DescCS;
4631 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4632 if (rcStrict != VINF_SUCCESS)
4633 {
4634 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4635 return rcStrict;
4636 }
4637
4638 /* CS must be a code segment. */
4639 if ( !DescCS.Legacy.Gen.u1DescType
4640 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4641 {
4642 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4643 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4644 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4645 }
4646
4647 /* For conforming CS, DPL must be less than or equal to the RPL. */
4648 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4649 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4650 {
4651 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4652 DescCS.Legacy.Gen.u2Dpl));
4653 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4654 }
4655
4656 /* For non-conforming CS, DPL must match RPL. */
4657 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4658 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4659 {
4660 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4661 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4662 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4663 }
4664
4665 /* Is it there? */
4666 if (!DescCS.Legacy.Gen.u1Present)
4667 {
4668 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4669 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4670 }
4671
4672 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4673 u64Base = X86DESC_BASE(&DescCS.Legacy);
4674
4675 /* Set the accessed bit before committing the result into CS. */
4676 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4677 {
4678 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4679 if (rcStrict != VINF_SUCCESS)
4680 return rcStrict;
4681 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4682 }
4683
4684 /* Commit CS. */
4685 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4686 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4687 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4688 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4689 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4690 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4691 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4692 }
4693
4694 /** @todo Debug trap. */
4695 if (fIsNewTSS386 && fNewDebugTrap)
4696 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4697
4698 /*
4699 * Construct the error code masks based on what caused this task switch.
4700 * See Intel Instruction reference for INT.
4701 */
4702 uint16_t uExt;
4703 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4704 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4705 {
4706 uExt = 1;
4707 }
4708 else
4709 uExt = 0;
4710
4711 /*
4712 * Push any error code on to the new stack.
4713 */
4714 if (fFlags & IEM_XCPT_FLAGS_ERR)
4715 {
4716 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4717 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4718 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4719
4720 /* Check that there is sufficient space on the stack. */
4721 /** @todo Factor out segment limit checking for normal/expand down segments
4722 * into a separate function. */
4723 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4724 {
4725 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4726 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4727 {
4728 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4729 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4730 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4731 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4732 }
4733 }
4734 else
4735 {
4736 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4737 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4738 {
4739 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4740 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4741 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4742 }
4743 }
4744
4745
4746 if (fIsNewTSS386)
4747 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4748 else
4749 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4750 if (rcStrict != VINF_SUCCESS)
4751 {
4752 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4753 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4754 return rcStrict;
4755 }
4756 }
4757
4758 /* Check the new EIP against the new CS limit. */
4759 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4760 {
4761 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4762 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4763 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4764 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4765 }
4766
4767 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4768 pVCpu->cpum.GstCtx.ss.Sel));
4769 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4770}
4771
4772
4773/**
4774 * Implements exceptions and interrupts for protected mode.
4775 *
4776 * @returns VBox strict status code.
4777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4778 * @param cbInstr The number of bytes to offset rIP by in the return
4779 * address.
4780 * @param u8Vector The interrupt / exception vector number.
4781 * @param fFlags The flags.
4782 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4783 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4784 */
4785IEM_STATIC VBOXSTRICTRC
4786iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4787 uint8_t cbInstr,
4788 uint8_t u8Vector,
4789 uint32_t fFlags,
4790 uint16_t uErr,
4791 uint64_t uCr2)
4792{
4793 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4794
4795 /*
4796 * Read the IDT entry.
4797 */
4798 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4799 {
4800 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4801 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4802 }
4803 X86DESC Idte;
4804 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4805 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4806 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4807 {
4808 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4809 return rcStrict;
4810 }
4811 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4812 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4813 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4814
4815 /*
4816 * Check the descriptor type, DPL and such.
4817 * ASSUMES this is done in the same order as described for call-gate calls.
4818 */
4819 if (Idte.Gate.u1DescType)
4820 {
4821 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4822 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4823 }
4824 bool fTaskGate = false;
4825 uint8_t f32BitGate = true;
4826 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4827 switch (Idte.Gate.u4Type)
4828 {
4829 case X86_SEL_TYPE_SYS_UNDEFINED:
4830 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4831 case X86_SEL_TYPE_SYS_LDT:
4832 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4833 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4834 case X86_SEL_TYPE_SYS_UNDEFINED2:
4835 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4836 case X86_SEL_TYPE_SYS_UNDEFINED3:
4837 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4838 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4839 case X86_SEL_TYPE_SYS_UNDEFINED4:
4840 {
4841 /** @todo check what actually happens when the type is wrong...
4842 * esp. call gates. */
4843 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4844 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4845 }
4846
4847 case X86_SEL_TYPE_SYS_286_INT_GATE:
4848 f32BitGate = false;
4849 RT_FALL_THRU();
4850 case X86_SEL_TYPE_SYS_386_INT_GATE:
4851 fEflToClear |= X86_EFL_IF;
4852 break;
4853
4854 case X86_SEL_TYPE_SYS_TASK_GATE:
4855 fTaskGate = true;
4856#ifndef IEM_IMPLEMENTS_TASKSWITCH
4857 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4858#endif
4859 break;
4860
4861 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4862 f32BitGate = false;
4863 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4864 break;
4865
4866 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4867 }
4868
4869 /* Check DPL against CPL if applicable. */
4870 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4871 {
4872 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4875 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4876 }
4877 }
4878
4879 /* Is it there? */
4880 if (!Idte.Gate.u1Present)
4881 {
4882 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4883 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4884 }
4885
4886 /* Is it a task-gate? */
4887 if (fTaskGate)
4888 {
4889 /*
4890 * Construct the error code masks based on what caused this task switch.
4891 * See Intel Instruction reference for INT.
4892 */
4893 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4894 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4895 RTSEL SelTSS = Idte.Gate.u16Sel;
4896
4897 /*
4898 * Fetch the TSS descriptor in the GDT.
4899 */
4900 IEMSELDESC DescTSS;
4901 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4902 if (rcStrict != VINF_SUCCESS)
4903 {
4904 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4905 VBOXSTRICTRC_VAL(rcStrict)));
4906 return rcStrict;
4907 }
4908
4909 /* The TSS descriptor must be a system segment and be available (not busy). */
4910 if ( DescTSS.Legacy.Gen.u1DescType
4911 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4912 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4913 {
4914 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4915 u8Vector, SelTSS, DescTSS.Legacy.au64));
4916 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4917 }
4918
4919 /* The TSS must be present. */
4920 if (!DescTSS.Legacy.Gen.u1Present)
4921 {
4922 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4923 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4924 }
4925
4926 /* Do the actual task switch. */
4927 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4928 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4929 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4930 }
4931
4932 /* A null CS is bad. */
4933 RTSEL NewCS = Idte.Gate.u16Sel;
4934 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4935 {
4936 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4937 return iemRaiseGeneralProtectionFault0(pVCpu);
4938 }
4939
4940 /* Fetch the descriptor for the new CS. */
4941 IEMSELDESC DescCS;
4942 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4943 if (rcStrict != VINF_SUCCESS)
4944 {
4945 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4946 return rcStrict;
4947 }
4948
4949 /* Must be a code segment. */
4950 if (!DescCS.Legacy.Gen.u1DescType)
4951 {
4952 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4953 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4954 }
4955 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4956 {
4957 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4958 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4959 }
4960
4961 /* Don't allow lowering the privilege level. */
4962 /** @todo Does the lowering of privileges apply to software interrupts
4963 * only? This has bearings on the more-privileged or
4964 * same-privilege stack behavior further down. A testcase would
4965 * be nice. */
4966 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4967 {
4968 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4969 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4970 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4971 }
4972
4973 /* Make sure the selector is present. */
4974 if (!DescCS.Legacy.Gen.u1Present)
4975 {
4976 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4977 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4978 }
4979
4980 /* Check the new EIP against the new CS limit. */
4981 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4982 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4983 ? Idte.Gate.u16OffsetLow
4984 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4985 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4986 if (uNewEip > cbLimitCS)
4987 {
4988 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4989 u8Vector, uNewEip, cbLimitCS, NewCS));
4990 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4991 }
4992 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4993
4994 /* Calc the flag image to push. */
4995 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4996 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4997 fEfl &= ~X86_EFL_RF;
4998 else
4999 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5000
5001 /* From V8086 mode only go to CPL 0. */
5002 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5003 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5004 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5005 {
5006 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5007 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5008 }
5009
5010 /*
5011 * If the privilege level changes, we need to get a new stack from the TSS.
5012 * This in turns means validating the new SS and ESP...
5013 */
5014 if (uNewCpl != pVCpu->iem.s.uCpl)
5015 {
5016 RTSEL NewSS;
5017 uint32_t uNewEsp;
5018 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5019 if (rcStrict != VINF_SUCCESS)
5020 return rcStrict;
5021
5022 IEMSELDESC DescSS;
5023 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5024 if (rcStrict != VINF_SUCCESS)
5025 return rcStrict;
5026 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5027 if (!DescSS.Legacy.Gen.u1DefBig)
5028 {
5029 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5030 uNewEsp = (uint16_t)uNewEsp;
5031 }
5032
5033 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5034
5035 /* Check that there is sufficient space for the stack frame. */
5036 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5037 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5038 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5039 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5040
5041 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5042 {
5043 if ( uNewEsp - 1 > cbLimitSS
5044 || uNewEsp < cbStackFrame)
5045 {
5046 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5047 u8Vector, NewSS, uNewEsp, cbStackFrame));
5048 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5049 }
5050 }
5051 else
5052 {
5053 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5054 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5055 {
5056 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5057 u8Vector, NewSS, uNewEsp, cbStackFrame));
5058 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5059 }
5060 }
5061
5062 /*
5063 * Start making changes.
5064 */
5065
5066 /* Set the new CPL so that stack accesses use it. */
5067 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5068 pVCpu->iem.s.uCpl = uNewCpl;
5069
5070 /* Create the stack frame. */
5071 RTPTRUNION uStackFrame;
5072 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5073 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5074 if (rcStrict != VINF_SUCCESS)
5075 return rcStrict;
5076 void * const pvStackFrame = uStackFrame.pv;
5077 if (f32BitGate)
5078 {
5079 if (fFlags & IEM_XCPT_FLAGS_ERR)
5080 *uStackFrame.pu32++ = uErr;
5081 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5082 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5083 uStackFrame.pu32[2] = fEfl;
5084 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5085 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5086 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5087 if (fEfl & X86_EFL_VM)
5088 {
5089 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5090 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5091 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5092 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5093 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5094 }
5095 }
5096 else
5097 {
5098 if (fFlags & IEM_XCPT_FLAGS_ERR)
5099 *uStackFrame.pu16++ = uErr;
5100 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5101 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5102 uStackFrame.pu16[2] = fEfl;
5103 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5104 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5105 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5106 if (fEfl & X86_EFL_VM)
5107 {
5108 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5109 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5110 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5111 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5112 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5113 }
5114 }
5115 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5116 if (rcStrict != VINF_SUCCESS)
5117 return rcStrict;
5118
5119 /* Mark the selectors 'accessed' (hope this is the correct time). */
5120 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5121 * after pushing the stack frame? (Write protect the gdt + stack to
5122 * find out.) */
5123 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5124 {
5125 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5126 if (rcStrict != VINF_SUCCESS)
5127 return rcStrict;
5128 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5129 }
5130
5131 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5132 {
5133 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5134 if (rcStrict != VINF_SUCCESS)
5135 return rcStrict;
5136 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5137 }
5138
5139 /*
5140 * Start comitting the register changes (joins with the DPL=CPL branch).
5141 */
5142 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5143 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5144 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5145 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5146 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5147 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5148 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5149 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5150 * SP is loaded).
5151 * Need to check the other combinations too:
5152 * - 16-bit TSS, 32-bit handler
5153 * - 32-bit TSS, 16-bit handler */
5154 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5155 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5156 else
5157 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5158
5159 if (fEfl & X86_EFL_VM)
5160 {
5161 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5162 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5163 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5164 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5165 }
5166 }
5167 /*
5168 * Same privilege, no stack change and smaller stack frame.
5169 */
5170 else
5171 {
5172 uint64_t uNewRsp;
5173 RTPTRUNION uStackFrame;
5174 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5175 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5176 if (rcStrict != VINF_SUCCESS)
5177 return rcStrict;
5178 void * const pvStackFrame = uStackFrame.pv;
5179
5180 if (f32BitGate)
5181 {
5182 if (fFlags & IEM_XCPT_FLAGS_ERR)
5183 *uStackFrame.pu32++ = uErr;
5184 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5185 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5186 uStackFrame.pu32[2] = fEfl;
5187 }
5188 else
5189 {
5190 if (fFlags & IEM_XCPT_FLAGS_ERR)
5191 *uStackFrame.pu16++ = uErr;
5192 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5193 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5194 uStackFrame.pu16[2] = fEfl;
5195 }
5196 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5197 if (rcStrict != VINF_SUCCESS)
5198 return rcStrict;
5199
5200 /* Mark the CS selector as 'accessed'. */
5201 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5202 {
5203 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5204 if (rcStrict != VINF_SUCCESS)
5205 return rcStrict;
5206 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5207 }
5208
5209 /*
5210 * Start committing the register changes (joins with the other branch).
5211 */
5212 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5213 }
5214
5215 /* ... register committing continues. */
5216 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5217 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5218 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5219 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5220 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5221 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5222
5223 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5224 fEfl &= ~fEflToClear;
5225 IEMMISC_SET_EFL(pVCpu, fEfl);
5226
5227 if (fFlags & IEM_XCPT_FLAGS_CR2)
5228 pVCpu->cpum.GstCtx.cr2 = uCr2;
5229
5230 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5231 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5232
5233 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5234}
5235
5236
5237/**
5238 * Implements exceptions and interrupts for long mode.
5239 *
5240 * @returns VBox strict status code.
5241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5242 * @param cbInstr The number of bytes to offset rIP by in the return
5243 * address.
5244 * @param u8Vector The interrupt / exception vector number.
5245 * @param fFlags The flags.
5246 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5247 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5248 */
5249IEM_STATIC VBOXSTRICTRC
5250iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5251 uint8_t cbInstr,
5252 uint8_t u8Vector,
5253 uint32_t fFlags,
5254 uint16_t uErr,
5255 uint64_t uCr2)
5256{
5257 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5258
5259 /*
5260 * Read the IDT entry.
5261 */
5262 uint16_t offIdt = (uint16_t)u8Vector << 4;
5263 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5264 {
5265 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5266 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5267 }
5268 X86DESC64 Idte;
5269 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5270 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5271 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5272 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5273 {
5274 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5275 return rcStrict;
5276 }
5277 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5278 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5279 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5280
5281 /*
5282 * Check the descriptor type, DPL and such.
5283 * ASSUMES this is done in the same order as described for call-gate calls.
5284 */
5285 if (Idte.Gate.u1DescType)
5286 {
5287 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5288 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5289 }
5290 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5291 switch (Idte.Gate.u4Type)
5292 {
5293 case AMD64_SEL_TYPE_SYS_INT_GATE:
5294 fEflToClear |= X86_EFL_IF;
5295 break;
5296 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5297 break;
5298
5299 default:
5300 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5301 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5302 }
5303
5304 /* Check DPL against CPL if applicable. */
5305 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5306 {
5307 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5308 {
5309 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5310 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5311 }
5312 }
5313
5314 /* Is it there? */
5315 if (!Idte.Gate.u1Present)
5316 {
5317 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5318 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5319 }
5320
5321 /* A null CS is bad. */
5322 RTSEL NewCS = Idte.Gate.u16Sel;
5323 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5324 {
5325 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5326 return iemRaiseGeneralProtectionFault0(pVCpu);
5327 }
5328
5329 /* Fetch the descriptor for the new CS. */
5330 IEMSELDESC DescCS;
5331 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5332 if (rcStrict != VINF_SUCCESS)
5333 {
5334 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5335 return rcStrict;
5336 }
5337
5338 /* Must be a 64-bit code segment. */
5339 if (!DescCS.Long.Gen.u1DescType)
5340 {
5341 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5342 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5343 }
5344 if ( !DescCS.Long.Gen.u1Long
5345 || DescCS.Long.Gen.u1DefBig
5346 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5347 {
5348 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5349 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5350 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5351 }
5352
5353 /* Don't allow lowering the privilege level. For non-conforming CS
5354 selectors, the CS.DPL sets the privilege level the trap/interrupt
5355 handler runs at. For conforming CS selectors, the CPL remains
5356 unchanged, but the CS.DPL must be <= CPL. */
5357 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5358 * when CPU in Ring-0. Result \#GP? */
5359 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5360 {
5361 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5362 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5363 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5364 }
5365
5366
5367 /* Make sure the selector is present. */
5368 if (!DescCS.Legacy.Gen.u1Present)
5369 {
5370 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5371 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5372 }
5373
5374 /* Check that the new RIP is canonical. */
5375 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5376 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5377 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5378 if (!IEM_IS_CANONICAL(uNewRip))
5379 {
5380 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5381 return iemRaiseGeneralProtectionFault0(pVCpu);
5382 }
5383
5384 /*
5385 * If the privilege level changes or if the IST isn't zero, we need to get
5386 * a new stack from the TSS.
5387 */
5388 uint64_t uNewRsp;
5389 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5390 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5391 if ( uNewCpl != pVCpu->iem.s.uCpl
5392 || Idte.Gate.u3IST != 0)
5393 {
5394 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5395 if (rcStrict != VINF_SUCCESS)
5396 return rcStrict;
5397 }
5398 else
5399 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5400 uNewRsp &= ~(uint64_t)0xf;
5401
5402 /*
5403 * Calc the flag image to push.
5404 */
5405 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5406 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5407 fEfl &= ~X86_EFL_RF;
5408 else
5409 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5410
5411 /*
5412 * Start making changes.
5413 */
5414 /* Set the new CPL so that stack accesses use it. */
5415 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5416 pVCpu->iem.s.uCpl = uNewCpl;
5417
5418 /* Create the stack frame. */
5419 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5420 RTPTRUNION uStackFrame;
5421 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5422 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5423 if (rcStrict != VINF_SUCCESS)
5424 return rcStrict;
5425 void * const pvStackFrame = uStackFrame.pv;
5426
5427 if (fFlags & IEM_XCPT_FLAGS_ERR)
5428 *uStackFrame.pu64++ = uErr;
5429 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5430 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5431 uStackFrame.pu64[2] = fEfl;
5432 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5433 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5434 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5435 if (rcStrict != VINF_SUCCESS)
5436 return rcStrict;
5437
5438 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5439 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5440 * after pushing the stack frame? (Write protect the gdt + stack to
5441 * find out.) */
5442 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5443 {
5444 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5445 if (rcStrict != VINF_SUCCESS)
5446 return rcStrict;
5447 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5448 }
5449
5450 /*
5451 * Start comitting the register changes.
5452 */
5453 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5454 * hidden registers when interrupting 32-bit or 16-bit code! */
5455 if (uNewCpl != uOldCpl)
5456 {
5457 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5458 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5459 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5460 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5461 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5462 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5463 }
5464 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5465 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5466 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5467 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5468 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5469 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5470 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5471 pVCpu->cpum.GstCtx.rip = uNewRip;
5472
5473 fEfl &= ~fEflToClear;
5474 IEMMISC_SET_EFL(pVCpu, fEfl);
5475
5476 if (fFlags & IEM_XCPT_FLAGS_CR2)
5477 pVCpu->cpum.GstCtx.cr2 = uCr2;
5478
5479 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5480 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5481
5482 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5483}
5484
5485
5486/**
5487 * Implements exceptions and interrupts.
5488 *
5489 * All exceptions and interrupts goes thru this function!
5490 *
5491 * @returns VBox strict status code.
5492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5493 * @param cbInstr The number of bytes to offset rIP by in the return
5494 * address.
5495 * @param u8Vector The interrupt / exception vector number.
5496 * @param fFlags The flags.
5497 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5498 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5499 */
5500DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5501iemRaiseXcptOrInt(PVMCPU pVCpu,
5502 uint8_t cbInstr,
5503 uint8_t u8Vector,
5504 uint32_t fFlags,
5505 uint16_t uErr,
5506 uint64_t uCr2)
5507{
5508 /*
5509 * Get all the state that we might need here.
5510 */
5511 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5512 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5513
5514#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5515 /*
5516 * Flush prefetch buffer
5517 */
5518 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5519#endif
5520
5521 /*
5522 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5523 */
5524 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5525 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5526 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5527 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5528 {
5529 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5530 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5531 u8Vector = X86_XCPT_GP;
5532 uErr = 0;
5533 }
5534#ifdef DBGFTRACE_ENABLED
5535 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5536 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5537 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5538#endif
5539
5540 /*
5541 * Evaluate whether NMI blocking should be in effect.
5542 * Normally, NMI blocking is in effect whenever we inject an NMI.
5543 */
5544 bool fBlockNmi;
5545 if ( u8Vector == X86_XCPT_NMI
5546 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5547 fBlockNmi = true;
5548 else
5549 fBlockNmi = false;
5550
5551#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5552 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5553 {
5554 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5555 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5556 return rcStrict0;
5557
5558 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5559 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5560 {
5561 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5562 fBlockNmi = false;
5563 }
5564 }
5565#endif
5566
5567#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5568 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5569 {
5570 /*
5571 * If the event is being injected as part of VMRUN, it isn't subject to event
5572 * intercepts in the nested-guest. However, secondary exceptions that occur
5573 * during injection of any event -are- subject to exception intercepts.
5574 *
5575 * See AMD spec. 15.20 "Event Injection".
5576 */
5577 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5578 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5579 else
5580 {
5581 /*
5582 * Check and handle if the event being raised is intercepted.
5583 */
5584 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5585 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5586 return rcStrict0;
5587 }
5588 }
5589#endif
5590
5591 /*
5592 * Set NMI blocking if necessary.
5593 */
5594 if ( fBlockNmi
5595 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5596 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5597
5598 /*
5599 * Do recursion accounting.
5600 */
5601 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5602 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5603 if (pVCpu->iem.s.cXcptRecursions == 0)
5604 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5605 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5606 else
5607 {
5608 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5609 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5610 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5611
5612 if (pVCpu->iem.s.cXcptRecursions >= 4)
5613 {
5614#ifdef DEBUG_bird
5615 AssertFailed();
5616#endif
5617 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5618 }
5619
5620 /*
5621 * Evaluate the sequence of recurring events.
5622 */
5623 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5624 NULL /* pXcptRaiseInfo */);
5625 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5626 { /* likely */ }
5627 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5628 {
5629 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5630 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5631 u8Vector = X86_XCPT_DF;
5632 uErr = 0;
5633#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5634 /* VMX nested-guest #DF intercept needs to be checked here. */
5635 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5636 {
5637 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5638 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5639 return rcStrict0;
5640 }
5641#endif
5642 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5643 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5644 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5645 }
5646 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5647 {
5648 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5649 return iemInitiateCpuShutdown(pVCpu);
5650 }
5651 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5652 {
5653 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5654 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5655 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5656 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5657 return VERR_EM_GUEST_CPU_HANG;
5658 }
5659 else
5660 {
5661 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5662 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5663 return VERR_IEM_IPE_9;
5664 }
5665
5666 /*
5667 * The 'EXT' bit is set when an exception occurs during deliver of an external
5668 * event (such as an interrupt or earlier exception)[1]. Privileged software
5669 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5670 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5671 *
5672 * [1] - Intel spec. 6.13 "Error Code"
5673 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5674 * [3] - Intel Instruction reference for INT n.
5675 */
5676 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5677 && (fFlags & IEM_XCPT_FLAGS_ERR)
5678 && u8Vector != X86_XCPT_PF
5679 && u8Vector != X86_XCPT_DF)
5680 {
5681 uErr |= X86_TRAP_ERR_EXTERNAL;
5682 }
5683 }
5684
5685 pVCpu->iem.s.cXcptRecursions++;
5686 pVCpu->iem.s.uCurXcpt = u8Vector;
5687 pVCpu->iem.s.fCurXcpt = fFlags;
5688 pVCpu->iem.s.uCurXcptErr = uErr;
5689 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5690
5691 /*
5692 * Extensive logging.
5693 */
5694#if defined(LOG_ENABLED) && defined(IN_RING3)
5695 if (LogIs3Enabled())
5696 {
5697 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5698 PVM pVM = pVCpu->CTX_SUFF(pVM);
5699 char szRegs[4096];
5700 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5701 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5702 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5703 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5704 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5705 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5706 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5707 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5708 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5709 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5710 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5711 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5712 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5713 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5714 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5715 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5716 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5717 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5718 " efer=%016VR{efer}\n"
5719 " pat=%016VR{pat}\n"
5720 " sf_mask=%016VR{sf_mask}\n"
5721 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5722 " lstar=%016VR{lstar}\n"
5723 " star=%016VR{star} cstar=%016VR{cstar}\n"
5724 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5725 );
5726
5727 char szInstr[256];
5728 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5729 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5730 szInstr, sizeof(szInstr), NULL);
5731 Log3(("%s%s\n", szRegs, szInstr));
5732 }
5733#endif /* LOG_ENABLED */
5734
5735 /*
5736 * Call the mode specific worker function.
5737 */
5738 VBOXSTRICTRC rcStrict;
5739 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5740 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5741 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5742 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5743 else
5744 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5745
5746 /* Flush the prefetch buffer. */
5747#ifdef IEM_WITH_CODE_TLB
5748 pVCpu->iem.s.pbInstrBuf = NULL;
5749#else
5750 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5751#endif
5752
5753 /*
5754 * Unwind.
5755 */
5756 pVCpu->iem.s.cXcptRecursions--;
5757 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5758 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5759 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5760 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5761 pVCpu->iem.s.cXcptRecursions + 1));
5762 return rcStrict;
5763}
5764
5765#ifdef IEM_WITH_SETJMP
5766/**
5767 * See iemRaiseXcptOrInt. Will not return.
5768 */
5769IEM_STATIC DECL_NO_RETURN(void)
5770iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5771 uint8_t cbInstr,
5772 uint8_t u8Vector,
5773 uint32_t fFlags,
5774 uint16_t uErr,
5775 uint64_t uCr2)
5776{
5777 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5778 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5779}
5780#endif
5781
5782
5783/** \#DE - 00. */
5784DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5785{
5786 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5787}
5788
5789
5790/** \#DB - 01.
5791 * @note This automatically clear DR7.GD. */
5792DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5793{
5794 /** @todo set/clear RF. */
5795 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5796 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5797}
5798
5799
5800/** \#BR - 05. */
5801DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5802{
5803 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5804}
5805
5806
5807/** \#UD - 06. */
5808DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5809{
5810 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5811}
5812
5813
5814/** \#NM - 07. */
5815DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5816{
5817 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5818}
5819
5820
5821/** \#TS(err) - 0a. */
5822DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5823{
5824 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5825}
5826
5827
5828/** \#TS(tr) - 0a. */
5829DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5830{
5831 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5832 pVCpu->cpum.GstCtx.tr.Sel, 0);
5833}
5834
5835
5836/** \#TS(0) - 0a. */
5837DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5838{
5839 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5840 0, 0);
5841}
5842
5843
5844/** \#TS(err) - 0a. */
5845DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5846{
5847 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5848 uSel & X86_SEL_MASK_OFF_RPL, 0);
5849}
5850
5851
5852/** \#NP(err) - 0b. */
5853DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5854{
5855 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5856}
5857
5858
5859/** \#NP(sel) - 0b. */
5860DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5861{
5862 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5863 uSel & ~X86_SEL_RPL, 0);
5864}
5865
5866
5867/** \#SS(seg) - 0c. */
5868DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5869{
5870 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5871 uSel & ~X86_SEL_RPL, 0);
5872}
5873
5874
5875/** \#SS(err) - 0c. */
5876DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5877{
5878 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5879}
5880
5881
5882/** \#GP(n) - 0d. */
5883DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5884{
5885 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5886}
5887
5888
5889/** \#GP(0) - 0d. */
5890DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5891{
5892 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5893}
5894
5895#ifdef IEM_WITH_SETJMP
5896/** \#GP(0) - 0d. */
5897DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5898{
5899 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5900}
5901#endif
5902
5903
5904/** \#GP(sel) - 0d. */
5905DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5906{
5907 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5908 Sel & ~X86_SEL_RPL, 0);
5909}
5910
5911
5912/** \#GP(0) - 0d. */
5913DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5914{
5915 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5916}
5917
5918
5919/** \#GP(sel) - 0d. */
5920DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5921{
5922 NOREF(iSegReg); NOREF(fAccess);
5923 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5924 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5925}
5926
5927#ifdef IEM_WITH_SETJMP
5928/** \#GP(sel) - 0d, longjmp. */
5929DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5930{
5931 NOREF(iSegReg); NOREF(fAccess);
5932 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5933 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5934}
5935#endif
5936
5937/** \#GP(sel) - 0d. */
5938DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5939{
5940 NOREF(Sel);
5941 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5942}
5943
5944#ifdef IEM_WITH_SETJMP
5945/** \#GP(sel) - 0d, longjmp. */
5946DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5947{
5948 NOREF(Sel);
5949 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5950}
5951#endif
5952
5953
5954/** \#GP(sel) - 0d. */
5955DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5956{
5957 NOREF(iSegReg); NOREF(fAccess);
5958 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5959}
5960
5961#ifdef IEM_WITH_SETJMP
5962/** \#GP(sel) - 0d, longjmp. */
5963DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5964 uint32_t fAccess)
5965{
5966 NOREF(iSegReg); NOREF(fAccess);
5967 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5968}
5969#endif
5970
5971
5972/** \#PF(n) - 0e. */
5973DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5974{
5975 uint16_t uErr;
5976 switch (rc)
5977 {
5978 case VERR_PAGE_NOT_PRESENT:
5979 case VERR_PAGE_TABLE_NOT_PRESENT:
5980 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5981 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5982 uErr = 0;
5983 break;
5984
5985 default:
5986 AssertMsgFailed(("%Rrc\n", rc));
5987 RT_FALL_THRU();
5988 case VERR_ACCESS_DENIED:
5989 uErr = X86_TRAP_PF_P;
5990 break;
5991
5992 /** @todo reserved */
5993 }
5994
5995 if (pVCpu->iem.s.uCpl == 3)
5996 uErr |= X86_TRAP_PF_US;
5997
5998 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5999 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6000 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
6001 uErr |= X86_TRAP_PF_ID;
6002
6003#if 0 /* This is so much non-sense, really. Why was it done like that? */
6004 /* Note! RW access callers reporting a WRITE protection fault, will clear
6005 the READ flag before calling. So, read-modify-write accesses (RW)
6006 can safely be reported as READ faults. */
6007 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6008 uErr |= X86_TRAP_PF_RW;
6009#else
6010 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6011 {
6012 if (!(fAccess & IEM_ACCESS_TYPE_READ))
6013 uErr |= X86_TRAP_PF_RW;
6014 }
6015#endif
6016
6017 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6018 uErr, GCPtrWhere);
6019}
6020
6021#ifdef IEM_WITH_SETJMP
6022/** \#PF(n) - 0e, longjmp. */
6023IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6024{
6025 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6026}
6027#endif
6028
6029
6030/** \#MF(0) - 10. */
6031DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6032{
6033 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6034}
6035
6036
6037/** \#AC(0) - 11. */
6038DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6039{
6040 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6041}
6042
6043
6044/**
6045 * Macro for calling iemCImplRaiseDivideError().
6046 *
6047 * This enables us to add/remove arguments and force different levels of
6048 * inlining as we wish.
6049 *
6050 * @return Strict VBox status code.
6051 */
6052#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6053IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6054{
6055 NOREF(cbInstr);
6056 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6057}
6058
6059
6060/**
6061 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6062 *
6063 * This enables us to add/remove arguments and force different levels of
6064 * inlining as we wish.
6065 *
6066 * @return Strict VBox status code.
6067 */
6068#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6069IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6070{
6071 NOREF(cbInstr);
6072 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6073}
6074
6075
6076/**
6077 * Macro for calling iemCImplRaiseInvalidOpcode().
6078 *
6079 * This enables us to add/remove arguments and force different levels of
6080 * inlining as we wish.
6081 *
6082 * @return Strict VBox status code.
6083 */
6084#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6085IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6086{
6087 NOREF(cbInstr);
6088 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6089}
6090
6091
6092/** @} */
6093
6094
6095/*
6096 *
6097 * Helpers routines.
6098 * Helpers routines.
6099 * Helpers routines.
6100 *
6101 */
6102
6103/**
6104 * Recalculates the effective operand size.
6105 *
6106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6107 */
6108IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6109{
6110 switch (pVCpu->iem.s.enmCpuMode)
6111 {
6112 case IEMMODE_16BIT:
6113 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6114 break;
6115 case IEMMODE_32BIT:
6116 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6117 break;
6118 case IEMMODE_64BIT:
6119 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6120 {
6121 case 0:
6122 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6123 break;
6124 case IEM_OP_PRF_SIZE_OP:
6125 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6126 break;
6127 case IEM_OP_PRF_SIZE_REX_W:
6128 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6129 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6130 break;
6131 }
6132 break;
6133 default:
6134 AssertFailed();
6135 }
6136}
6137
6138
6139/**
6140 * Sets the default operand size to 64-bit and recalculates the effective
6141 * operand size.
6142 *
6143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6144 */
6145IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6146{
6147 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6148 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6149 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6150 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6151 else
6152 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6153}
6154
6155
6156/*
6157 *
6158 * Common opcode decoders.
6159 * Common opcode decoders.
6160 * Common opcode decoders.
6161 *
6162 */
6163//#include <iprt/mem.h>
6164
6165/**
6166 * Used to add extra details about a stub case.
6167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6168 */
6169IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6170{
6171#if defined(LOG_ENABLED) && defined(IN_RING3)
6172 PVM pVM = pVCpu->CTX_SUFF(pVM);
6173 char szRegs[4096];
6174 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6175 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6176 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6177 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6178 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6179 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6180 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6181 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6182 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6183 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6184 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6185 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6186 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6187 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6188 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6189 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6190 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6191 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6192 " efer=%016VR{efer}\n"
6193 " pat=%016VR{pat}\n"
6194 " sf_mask=%016VR{sf_mask}\n"
6195 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6196 " lstar=%016VR{lstar}\n"
6197 " star=%016VR{star} cstar=%016VR{cstar}\n"
6198 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6199 );
6200
6201 char szInstr[256];
6202 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6203 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6204 szInstr, sizeof(szInstr), NULL);
6205
6206 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6207#else
6208 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6209#endif
6210}
6211
6212/**
6213 * Complains about a stub.
6214 *
6215 * Providing two versions of this macro, one for daily use and one for use when
6216 * working on IEM.
6217 */
6218#if 0
6219# define IEMOP_BITCH_ABOUT_STUB() \
6220 do { \
6221 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6222 iemOpStubMsg2(pVCpu); \
6223 RTAssertPanic(); \
6224 } while (0)
6225#else
6226# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6227#endif
6228
6229/** Stubs an opcode. */
6230#define FNIEMOP_STUB(a_Name) \
6231 FNIEMOP_DEF(a_Name) \
6232 { \
6233 RT_NOREF_PV(pVCpu); \
6234 IEMOP_BITCH_ABOUT_STUB(); \
6235 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6236 } \
6237 typedef int ignore_semicolon
6238
6239/** Stubs an opcode. */
6240#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6241 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6242 { \
6243 RT_NOREF_PV(pVCpu); \
6244 RT_NOREF_PV(a_Name0); \
6245 IEMOP_BITCH_ABOUT_STUB(); \
6246 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6247 } \
6248 typedef int ignore_semicolon
6249
6250/** Stubs an opcode which currently should raise \#UD. */
6251#define FNIEMOP_UD_STUB(a_Name) \
6252 FNIEMOP_DEF(a_Name) \
6253 { \
6254 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6255 return IEMOP_RAISE_INVALID_OPCODE(); \
6256 } \
6257 typedef int ignore_semicolon
6258
6259/** Stubs an opcode which currently should raise \#UD. */
6260#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6261 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6262 { \
6263 RT_NOREF_PV(pVCpu); \
6264 RT_NOREF_PV(a_Name0); \
6265 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6266 return IEMOP_RAISE_INVALID_OPCODE(); \
6267 } \
6268 typedef int ignore_semicolon
6269
6270
6271
6272/** @name Register Access.
6273 * @{
6274 */
6275
6276/**
6277 * Gets a reference (pointer) to the specified hidden segment register.
6278 *
6279 * @returns Hidden register reference.
6280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6281 * @param iSegReg The segment register.
6282 */
6283IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6284{
6285 Assert(iSegReg < X86_SREG_COUNT);
6286 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6287 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6288
6289#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6290 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6291 { /* likely */ }
6292 else
6293 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6294#else
6295 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6296#endif
6297 return pSReg;
6298}
6299
6300
6301/**
6302 * Ensures that the given hidden segment register is up to date.
6303 *
6304 * @returns Hidden register reference.
6305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6306 * @param pSReg The segment register.
6307 */
6308IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6309{
6310#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6311 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6312 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6313#else
6314 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6315 NOREF(pVCpu);
6316#endif
6317 return pSReg;
6318}
6319
6320
6321/**
6322 * Gets a reference (pointer) to the specified segment register (the selector
6323 * value).
6324 *
6325 * @returns Pointer to the selector variable.
6326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6327 * @param iSegReg The segment register.
6328 */
6329DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6330{
6331 Assert(iSegReg < X86_SREG_COUNT);
6332 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6333 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6334}
6335
6336
6337/**
6338 * Fetches the selector value of a segment register.
6339 *
6340 * @returns The selector value.
6341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6342 * @param iSegReg The segment register.
6343 */
6344DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6345{
6346 Assert(iSegReg < X86_SREG_COUNT);
6347 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6348 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6349}
6350
6351
6352/**
6353 * Fetches the base address value of a segment register.
6354 *
6355 * @returns The selector value.
6356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6357 * @param iSegReg The segment register.
6358 */
6359DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6360{
6361 Assert(iSegReg < X86_SREG_COUNT);
6362 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6363 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6364}
6365
6366
6367/**
6368 * Gets a reference (pointer) to the specified general purpose register.
6369 *
6370 * @returns Register reference.
6371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6372 * @param iReg The general purpose register.
6373 */
6374DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6375{
6376 Assert(iReg < 16);
6377 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6378}
6379
6380
6381/**
6382 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6383 *
6384 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6385 *
6386 * @returns Register reference.
6387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6388 * @param iReg The register.
6389 */
6390DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6391{
6392 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6393 {
6394 Assert(iReg < 16);
6395 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6396 }
6397 /* high 8-bit register. */
6398 Assert(iReg < 8);
6399 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6400}
6401
6402
6403/**
6404 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6405 *
6406 * @returns Register reference.
6407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6408 * @param iReg The register.
6409 */
6410DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6411{
6412 Assert(iReg < 16);
6413 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6414}
6415
6416
6417/**
6418 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6419 *
6420 * @returns Register reference.
6421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6422 * @param iReg The register.
6423 */
6424DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6425{
6426 Assert(iReg < 16);
6427 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6428}
6429
6430
6431/**
6432 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6433 *
6434 * @returns Register reference.
6435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6436 * @param iReg The register.
6437 */
6438DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6439{
6440 Assert(iReg < 64);
6441 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6442}
6443
6444
6445/**
6446 * Gets a reference (pointer) to the specified segment register's base address.
6447 *
6448 * @returns Segment register base address reference.
6449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6450 * @param iSegReg The segment selector.
6451 */
6452DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6453{
6454 Assert(iSegReg < X86_SREG_COUNT);
6455 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6456 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6457}
6458
6459
6460/**
6461 * Fetches the value of a 8-bit general purpose register.
6462 *
6463 * @returns The register value.
6464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6465 * @param iReg The register.
6466 */
6467DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6468{
6469 return *iemGRegRefU8(pVCpu, iReg);
6470}
6471
6472
6473/**
6474 * Fetches the value of a 16-bit general purpose register.
6475 *
6476 * @returns The register value.
6477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6478 * @param iReg The register.
6479 */
6480DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6481{
6482 Assert(iReg < 16);
6483 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6484}
6485
6486
6487/**
6488 * Fetches the value of a 32-bit general purpose register.
6489 *
6490 * @returns The register value.
6491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6492 * @param iReg The register.
6493 */
6494DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6495{
6496 Assert(iReg < 16);
6497 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6498}
6499
6500
6501/**
6502 * Fetches the value of a 64-bit general purpose register.
6503 *
6504 * @returns The register value.
6505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6506 * @param iReg The register.
6507 */
6508DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6509{
6510 Assert(iReg < 16);
6511 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6512}
6513
6514
6515/**
6516 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6517 *
6518 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6519 * segment limit.
6520 *
6521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6522 * @param offNextInstr The offset of the next instruction.
6523 */
6524IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6525{
6526 switch (pVCpu->iem.s.enmEffOpSize)
6527 {
6528 case IEMMODE_16BIT:
6529 {
6530 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6531 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6532 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6533 return iemRaiseGeneralProtectionFault0(pVCpu);
6534 pVCpu->cpum.GstCtx.rip = uNewIp;
6535 break;
6536 }
6537
6538 case IEMMODE_32BIT:
6539 {
6540 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6541 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6542
6543 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6544 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6545 return iemRaiseGeneralProtectionFault0(pVCpu);
6546 pVCpu->cpum.GstCtx.rip = uNewEip;
6547 break;
6548 }
6549
6550 case IEMMODE_64BIT:
6551 {
6552 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6553
6554 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6555 if (!IEM_IS_CANONICAL(uNewRip))
6556 return iemRaiseGeneralProtectionFault0(pVCpu);
6557 pVCpu->cpum.GstCtx.rip = uNewRip;
6558 break;
6559 }
6560
6561 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6562 }
6563
6564 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6565
6566#ifndef IEM_WITH_CODE_TLB
6567 /* Flush the prefetch buffer. */
6568 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6569#endif
6570
6571 return VINF_SUCCESS;
6572}
6573
6574
6575/**
6576 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6577 *
6578 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6579 * segment limit.
6580 *
6581 * @returns Strict VBox status code.
6582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6583 * @param offNextInstr The offset of the next instruction.
6584 */
6585IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6586{
6587 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6588
6589 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6590 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6591 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6592 return iemRaiseGeneralProtectionFault0(pVCpu);
6593 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6594 pVCpu->cpum.GstCtx.rip = uNewIp;
6595 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6596
6597#ifndef IEM_WITH_CODE_TLB
6598 /* Flush the prefetch buffer. */
6599 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6600#endif
6601
6602 return VINF_SUCCESS;
6603}
6604
6605
6606/**
6607 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6608 *
6609 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6610 * segment limit.
6611 *
6612 * @returns Strict VBox status code.
6613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6614 * @param offNextInstr The offset of the next instruction.
6615 */
6616IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6617{
6618 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6619
6620 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6621 {
6622 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6623
6624 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6625 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6626 return iemRaiseGeneralProtectionFault0(pVCpu);
6627 pVCpu->cpum.GstCtx.rip = uNewEip;
6628 }
6629 else
6630 {
6631 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6632
6633 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6634 if (!IEM_IS_CANONICAL(uNewRip))
6635 return iemRaiseGeneralProtectionFault0(pVCpu);
6636 pVCpu->cpum.GstCtx.rip = uNewRip;
6637 }
6638 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6639
6640#ifndef IEM_WITH_CODE_TLB
6641 /* Flush the prefetch buffer. */
6642 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6643#endif
6644
6645 return VINF_SUCCESS;
6646}
6647
6648
6649/**
6650 * Performs a near jump to the specified address.
6651 *
6652 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6653 * segment limit.
6654 *
6655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6656 * @param uNewRip The new RIP value.
6657 */
6658IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6659{
6660 switch (pVCpu->iem.s.enmEffOpSize)
6661 {
6662 case IEMMODE_16BIT:
6663 {
6664 Assert(uNewRip <= UINT16_MAX);
6665 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6666 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6667 return iemRaiseGeneralProtectionFault0(pVCpu);
6668 /** @todo Test 16-bit jump in 64-bit mode. */
6669 pVCpu->cpum.GstCtx.rip = uNewRip;
6670 break;
6671 }
6672
6673 case IEMMODE_32BIT:
6674 {
6675 Assert(uNewRip <= UINT32_MAX);
6676 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6677 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6678
6679 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6680 return iemRaiseGeneralProtectionFault0(pVCpu);
6681 pVCpu->cpum.GstCtx.rip = uNewRip;
6682 break;
6683 }
6684
6685 case IEMMODE_64BIT:
6686 {
6687 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6688
6689 if (!IEM_IS_CANONICAL(uNewRip))
6690 return iemRaiseGeneralProtectionFault0(pVCpu);
6691 pVCpu->cpum.GstCtx.rip = uNewRip;
6692 break;
6693 }
6694
6695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6696 }
6697
6698 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6699
6700#ifndef IEM_WITH_CODE_TLB
6701 /* Flush the prefetch buffer. */
6702 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6703#endif
6704
6705 return VINF_SUCCESS;
6706}
6707
6708
6709/**
6710 * Get the address of the top of the stack.
6711 *
6712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6713 */
6714DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6715{
6716 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6717 return pVCpu->cpum.GstCtx.rsp;
6718 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6719 return pVCpu->cpum.GstCtx.esp;
6720 return pVCpu->cpum.GstCtx.sp;
6721}
6722
6723
6724/**
6725 * Updates the RIP/EIP/IP to point to the next instruction.
6726 *
6727 * This function leaves the EFLAGS.RF flag alone.
6728 *
6729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6730 * @param cbInstr The number of bytes to add.
6731 */
6732IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6733{
6734 switch (pVCpu->iem.s.enmCpuMode)
6735 {
6736 case IEMMODE_16BIT:
6737 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6738 pVCpu->cpum.GstCtx.eip += cbInstr;
6739 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6740 break;
6741
6742 case IEMMODE_32BIT:
6743 pVCpu->cpum.GstCtx.eip += cbInstr;
6744 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6745 break;
6746
6747 case IEMMODE_64BIT:
6748 pVCpu->cpum.GstCtx.rip += cbInstr;
6749 break;
6750 default: AssertFailed();
6751 }
6752}
6753
6754
6755#if 0
6756/**
6757 * Updates the RIP/EIP/IP to point to the next instruction.
6758 *
6759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6760 */
6761IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6762{
6763 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6764}
6765#endif
6766
6767
6768
6769/**
6770 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6771 *
6772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6773 * @param cbInstr The number of bytes to add.
6774 */
6775IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6776{
6777 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6778
6779 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6780#if ARCH_BITS >= 64
6781 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6782 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6783 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6784#else
6785 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6786 pVCpu->cpum.GstCtx.rip += cbInstr;
6787 else
6788 pVCpu->cpum.GstCtx.eip += cbInstr;
6789#endif
6790}
6791
6792
6793/**
6794 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6795 *
6796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6797 */
6798IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6799{
6800 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6801}
6802
6803
6804/**
6805 * Adds to the stack pointer.
6806 *
6807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6808 * @param cbToAdd The number of bytes to add (8-bit!).
6809 */
6810DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6811{
6812 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6813 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6814 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6815 pVCpu->cpum.GstCtx.esp += cbToAdd;
6816 else
6817 pVCpu->cpum.GstCtx.sp += cbToAdd;
6818}
6819
6820
6821/**
6822 * Subtracts from the stack pointer.
6823 *
6824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6825 * @param cbToSub The number of bytes to subtract (8-bit!).
6826 */
6827DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6828{
6829 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6830 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6831 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6832 pVCpu->cpum.GstCtx.esp -= cbToSub;
6833 else
6834 pVCpu->cpum.GstCtx.sp -= cbToSub;
6835}
6836
6837
6838/**
6839 * Adds to the temporary stack pointer.
6840 *
6841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6842 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6843 * @param cbToAdd The number of bytes to add (16-bit).
6844 */
6845DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6846{
6847 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6848 pTmpRsp->u += cbToAdd;
6849 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6850 pTmpRsp->DWords.dw0 += cbToAdd;
6851 else
6852 pTmpRsp->Words.w0 += cbToAdd;
6853}
6854
6855
6856/**
6857 * Subtracts from the temporary stack pointer.
6858 *
6859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6860 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6861 * @param cbToSub The number of bytes to subtract.
6862 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6863 * expecting that.
6864 */
6865DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6866{
6867 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6868 pTmpRsp->u -= cbToSub;
6869 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6870 pTmpRsp->DWords.dw0 -= cbToSub;
6871 else
6872 pTmpRsp->Words.w0 -= cbToSub;
6873}
6874
6875
6876/**
6877 * Calculates the effective stack address for a push of the specified size as
6878 * well as the new RSP value (upper bits may be masked).
6879 *
6880 * @returns Effective stack addressf for the push.
6881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6882 * @param cbItem The size of the stack item to pop.
6883 * @param puNewRsp Where to return the new RSP value.
6884 */
6885DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6886{
6887 RTUINT64U uTmpRsp;
6888 RTGCPTR GCPtrTop;
6889 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6890
6891 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6892 GCPtrTop = uTmpRsp.u -= cbItem;
6893 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6894 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6895 else
6896 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6897 *puNewRsp = uTmpRsp.u;
6898 return GCPtrTop;
6899}
6900
6901
6902/**
6903 * Gets the current stack pointer and calculates the value after a pop of the
6904 * specified size.
6905 *
6906 * @returns Current stack pointer.
6907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6908 * @param cbItem The size of the stack item to pop.
6909 * @param puNewRsp Where to return the new RSP value.
6910 */
6911DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6912{
6913 RTUINT64U uTmpRsp;
6914 RTGCPTR GCPtrTop;
6915 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6916
6917 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6918 {
6919 GCPtrTop = uTmpRsp.u;
6920 uTmpRsp.u += cbItem;
6921 }
6922 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6923 {
6924 GCPtrTop = uTmpRsp.DWords.dw0;
6925 uTmpRsp.DWords.dw0 += cbItem;
6926 }
6927 else
6928 {
6929 GCPtrTop = uTmpRsp.Words.w0;
6930 uTmpRsp.Words.w0 += cbItem;
6931 }
6932 *puNewRsp = uTmpRsp.u;
6933 return GCPtrTop;
6934}
6935
6936
6937/**
6938 * Calculates the effective stack address for a push of the specified size as
6939 * well as the new temporary RSP value (upper bits may be masked).
6940 *
6941 * @returns Effective stack addressf for the push.
6942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6943 * @param pTmpRsp The temporary stack pointer. This is updated.
6944 * @param cbItem The size of the stack item to pop.
6945 */
6946DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6947{
6948 RTGCPTR GCPtrTop;
6949
6950 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6951 GCPtrTop = pTmpRsp->u -= cbItem;
6952 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6953 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6954 else
6955 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6956 return GCPtrTop;
6957}
6958
6959
6960/**
6961 * Gets the effective stack address for a pop of the specified size and
6962 * calculates and updates the temporary RSP.
6963 *
6964 * @returns Current stack pointer.
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 * @param pTmpRsp The temporary stack pointer. This is updated.
6967 * @param cbItem The size of the stack item to pop.
6968 */
6969DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6970{
6971 RTGCPTR GCPtrTop;
6972 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6973 {
6974 GCPtrTop = pTmpRsp->u;
6975 pTmpRsp->u += cbItem;
6976 }
6977 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6978 {
6979 GCPtrTop = pTmpRsp->DWords.dw0;
6980 pTmpRsp->DWords.dw0 += cbItem;
6981 }
6982 else
6983 {
6984 GCPtrTop = pTmpRsp->Words.w0;
6985 pTmpRsp->Words.w0 += cbItem;
6986 }
6987 return GCPtrTop;
6988}
6989
6990/** @} */
6991
6992
6993/** @name FPU access and helpers.
6994 *
6995 * @{
6996 */
6997
6998
6999/**
7000 * Hook for preparing to use the host FPU.
7001 *
7002 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7003 *
7004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7005 */
7006DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
7007{
7008#ifdef IN_RING3
7009 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7010#else
7011 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7012#endif
7013 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7014}
7015
7016
7017/**
7018 * Hook for preparing to use the host FPU for SSE.
7019 *
7020 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7021 *
7022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7023 */
7024DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
7025{
7026 iemFpuPrepareUsage(pVCpu);
7027}
7028
7029
7030/**
7031 * Hook for preparing to use the host FPU for AVX.
7032 *
7033 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7034 *
7035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7036 */
7037DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7038{
7039 iemFpuPrepareUsage(pVCpu);
7040}
7041
7042
7043/**
7044 * Hook for actualizing the guest FPU state before the interpreter reads it.
7045 *
7046 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7047 *
7048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7049 */
7050DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7051{
7052#ifdef IN_RING3
7053 NOREF(pVCpu);
7054#else
7055 CPUMRZFpuStateActualizeForRead(pVCpu);
7056#endif
7057 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7058}
7059
7060
7061/**
7062 * Hook for actualizing the guest FPU state before the interpreter changes it.
7063 *
7064 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7065 *
7066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7067 */
7068DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7069{
7070#ifdef IN_RING3
7071 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7072#else
7073 CPUMRZFpuStateActualizeForChange(pVCpu);
7074#endif
7075 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7076}
7077
7078
7079/**
7080 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7081 * only.
7082 *
7083 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7084 *
7085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7086 */
7087DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7088{
7089#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7090 NOREF(pVCpu);
7091#else
7092 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7093#endif
7094 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7095}
7096
7097
7098/**
7099 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7100 * read+write.
7101 *
7102 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7103 *
7104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7105 */
7106DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7107{
7108#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7109 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7110#else
7111 CPUMRZFpuStateActualizeForChange(pVCpu);
7112#endif
7113 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7114}
7115
7116
7117/**
7118 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7119 * only.
7120 *
7121 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7122 *
7123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7124 */
7125DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7126{
7127#ifdef IN_RING3
7128 NOREF(pVCpu);
7129#else
7130 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7131#endif
7132 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7133}
7134
7135
7136/**
7137 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7138 * read+write.
7139 *
7140 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7141 *
7142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7143 */
7144DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7145{
7146#ifdef IN_RING3
7147 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7148#else
7149 CPUMRZFpuStateActualizeForChange(pVCpu);
7150#endif
7151 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7152}
7153
7154
7155/**
7156 * Stores a QNaN value into a FPU register.
7157 *
7158 * @param pReg Pointer to the register.
7159 */
7160DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7161{
7162 pReg->au32[0] = UINT32_C(0x00000000);
7163 pReg->au32[1] = UINT32_C(0xc0000000);
7164 pReg->au16[4] = UINT16_C(0xffff);
7165}
7166
7167
7168/**
7169 * Updates the FOP, FPU.CS and FPUIP registers.
7170 *
7171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7172 * @param pFpuCtx The FPU context.
7173 */
7174DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7175{
7176 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7177 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7178 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7179 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7180 {
7181 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7182 * happens in real mode here based on the fnsave and fnstenv images. */
7183 pFpuCtx->CS = 0;
7184 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7185 }
7186 else
7187 {
7188 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7189 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7190 }
7191}
7192
7193
7194/**
7195 * Updates the x87.DS and FPUDP registers.
7196 *
7197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7198 * @param pFpuCtx The FPU context.
7199 * @param iEffSeg The effective segment register.
7200 * @param GCPtrEff The effective address relative to @a iEffSeg.
7201 */
7202DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7203{
7204 RTSEL sel;
7205 switch (iEffSeg)
7206 {
7207 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7208 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7209 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7210 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7211 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7212 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7213 default:
7214 AssertMsgFailed(("%d\n", iEffSeg));
7215 sel = pVCpu->cpum.GstCtx.ds.Sel;
7216 }
7217 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7218 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7219 {
7220 pFpuCtx->DS = 0;
7221 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7222 }
7223 else
7224 {
7225 pFpuCtx->DS = sel;
7226 pFpuCtx->FPUDP = GCPtrEff;
7227 }
7228}
7229
7230
7231/**
7232 * Rotates the stack registers in the push direction.
7233 *
7234 * @param pFpuCtx The FPU context.
7235 * @remarks This is a complete waste of time, but fxsave stores the registers in
7236 * stack order.
7237 */
7238DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7239{
7240 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7241 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7242 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7243 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7244 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7245 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7246 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7247 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7248 pFpuCtx->aRegs[0].r80 = r80Tmp;
7249}
7250
7251
7252/**
7253 * Rotates the stack registers in the pop direction.
7254 *
7255 * @param pFpuCtx The FPU context.
7256 * @remarks This is a complete waste of time, but fxsave stores the registers in
7257 * stack order.
7258 */
7259DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7260{
7261 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7262 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7263 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7264 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7265 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7266 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7267 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7268 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7269 pFpuCtx->aRegs[7].r80 = r80Tmp;
7270}
7271
7272
7273/**
7274 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7275 * exception prevents it.
7276 *
7277 * @param pResult The FPU operation result to push.
7278 * @param pFpuCtx The FPU context.
7279 */
7280IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7281{
7282 /* Update FSW and bail if there are pending exceptions afterwards. */
7283 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7284 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7285 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7286 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7287 {
7288 pFpuCtx->FSW = fFsw;
7289 return;
7290 }
7291
7292 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7293 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7294 {
7295 /* All is fine, push the actual value. */
7296 pFpuCtx->FTW |= RT_BIT(iNewTop);
7297 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7298 }
7299 else if (pFpuCtx->FCW & X86_FCW_IM)
7300 {
7301 /* Masked stack overflow, push QNaN. */
7302 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7303 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7304 }
7305 else
7306 {
7307 /* Raise stack overflow, don't push anything. */
7308 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7309 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7310 return;
7311 }
7312
7313 fFsw &= ~X86_FSW_TOP_MASK;
7314 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7315 pFpuCtx->FSW = fFsw;
7316
7317 iemFpuRotateStackPush(pFpuCtx);
7318}
7319
7320
7321/**
7322 * Stores a result in a FPU register and updates the FSW and FTW.
7323 *
7324 * @param pFpuCtx The FPU context.
7325 * @param pResult The result to store.
7326 * @param iStReg Which FPU register to store it in.
7327 */
7328IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7329{
7330 Assert(iStReg < 8);
7331 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7332 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7333 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7334 pFpuCtx->FTW |= RT_BIT(iReg);
7335 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7336}
7337
7338
7339/**
7340 * Only updates the FPU status word (FSW) with the result of the current
7341 * instruction.
7342 *
7343 * @param pFpuCtx The FPU context.
7344 * @param u16FSW The FSW output of the current instruction.
7345 */
7346IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7347{
7348 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7349 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7350}
7351
7352
7353/**
7354 * Pops one item off the FPU stack if no pending exception prevents it.
7355 *
7356 * @param pFpuCtx The FPU context.
7357 */
7358IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7359{
7360 /* Check pending exceptions. */
7361 uint16_t uFSW = pFpuCtx->FSW;
7362 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7363 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7364 return;
7365
7366 /* TOP--. */
7367 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7368 uFSW &= ~X86_FSW_TOP_MASK;
7369 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7370 pFpuCtx->FSW = uFSW;
7371
7372 /* Mark the previous ST0 as empty. */
7373 iOldTop >>= X86_FSW_TOP_SHIFT;
7374 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7375
7376 /* Rotate the registers. */
7377 iemFpuRotateStackPop(pFpuCtx);
7378}
7379
7380
7381/**
7382 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7383 *
7384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7385 * @param pResult The FPU operation result to push.
7386 */
7387IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7388{
7389 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7390 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7391 iemFpuMaybePushResult(pResult, pFpuCtx);
7392}
7393
7394
7395/**
7396 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7397 * and sets FPUDP and FPUDS.
7398 *
7399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7400 * @param pResult The FPU operation result to push.
7401 * @param iEffSeg The effective segment register.
7402 * @param GCPtrEff The effective address relative to @a iEffSeg.
7403 */
7404IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7405{
7406 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7407 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7408 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7409 iemFpuMaybePushResult(pResult, pFpuCtx);
7410}
7411
7412
7413/**
7414 * Replace ST0 with the first value and push the second onto the FPU stack,
7415 * unless a pending exception prevents it.
7416 *
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 * @param pResult The FPU operation result to store and push.
7419 */
7420IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7421{
7422 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7423 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7424
7425 /* Update FSW and bail if there are pending exceptions afterwards. */
7426 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7427 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7428 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7429 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7430 {
7431 pFpuCtx->FSW = fFsw;
7432 return;
7433 }
7434
7435 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7436 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7437 {
7438 /* All is fine, push the actual value. */
7439 pFpuCtx->FTW |= RT_BIT(iNewTop);
7440 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7441 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7442 }
7443 else if (pFpuCtx->FCW & X86_FCW_IM)
7444 {
7445 /* Masked stack overflow, push QNaN. */
7446 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7447 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7448 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7449 }
7450 else
7451 {
7452 /* Raise stack overflow, don't push anything. */
7453 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7454 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7455 return;
7456 }
7457
7458 fFsw &= ~X86_FSW_TOP_MASK;
7459 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7460 pFpuCtx->FSW = fFsw;
7461
7462 iemFpuRotateStackPush(pFpuCtx);
7463}
7464
7465
7466/**
7467 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7468 * FOP.
7469 *
7470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7471 * @param pResult The result to store.
7472 * @param iStReg Which FPU register to store it in.
7473 */
7474IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7475{
7476 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7477 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7478 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7479}
7480
7481
7482/**
7483 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7484 * FOP, and then pops the stack.
7485 *
7486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7487 * @param pResult The result to store.
7488 * @param iStReg Which FPU register to store it in.
7489 */
7490IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7491{
7492 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7493 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7494 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7495 iemFpuMaybePopOne(pFpuCtx);
7496}
7497
7498
7499/**
7500 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7501 * FPUDP, and FPUDS.
7502 *
7503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7504 * @param pResult The result to store.
7505 * @param iStReg Which FPU register to store it in.
7506 * @param iEffSeg The effective memory operand selector register.
7507 * @param GCPtrEff The effective memory operand offset.
7508 */
7509IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7510 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7511{
7512 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7513 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7514 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7515 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7516}
7517
7518
7519/**
7520 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7521 * FPUDP, and FPUDS, and then pops the stack.
7522 *
7523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7524 * @param pResult The result to store.
7525 * @param iStReg Which FPU register to store it in.
7526 * @param iEffSeg The effective memory operand selector register.
7527 * @param GCPtrEff The effective memory operand offset.
7528 */
7529IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7530 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7531{
7532 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7533 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7534 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7535 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7536 iemFpuMaybePopOne(pFpuCtx);
7537}
7538
7539
7540/**
7541 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7542 *
7543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7544 */
7545IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7546{
7547 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7548 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7549}
7550
7551
7552/**
7553 * Marks the specified stack register as free (for FFREE).
7554 *
7555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7556 * @param iStReg The register to free.
7557 */
7558IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7559{
7560 Assert(iStReg < 8);
7561 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7562 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7563 pFpuCtx->FTW &= ~RT_BIT(iReg);
7564}
7565
7566
7567/**
7568 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7569 *
7570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7571 */
7572IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7573{
7574 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7575 uint16_t uFsw = pFpuCtx->FSW;
7576 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7577 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7578 uFsw &= ~X86_FSW_TOP_MASK;
7579 uFsw |= uTop;
7580 pFpuCtx->FSW = uFsw;
7581}
7582
7583
7584/**
7585 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7586 *
7587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7588 */
7589IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7590{
7591 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7592 uint16_t uFsw = pFpuCtx->FSW;
7593 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7594 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7595 uFsw &= ~X86_FSW_TOP_MASK;
7596 uFsw |= uTop;
7597 pFpuCtx->FSW = uFsw;
7598}
7599
7600
7601/**
7602 * Updates the FSW, FOP, FPUIP, and FPUCS.
7603 *
7604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7605 * @param u16FSW The FSW from the current instruction.
7606 */
7607IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7608{
7609 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7610 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7611 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7612}
7613
7614
7615/**
7616 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7617 *
7618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7619 * @param u16FSW The FSW from the current instruction.
7620 */
7621IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7622{
7623 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7624 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7625 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7626 iemFpuMaybePopOne(pFpuCtx);
7627}
7628
7629
7630/**
7631 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7632 *
7633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7634 * @param u16FSW The FSW from the current instruction.
7635 * @param iEffSeg The effective memory operand selector register.
7636 * @param GCPtrEff The effective memory operand offset.
7637 */
7638IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7639{
7640 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7641 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7642 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7643 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7644}
7645
7646
7647/**
7648 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7649 *
7650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7651 * @param u16FSW The FSW from the current instruction.
7652 */
7653IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7654{
7655 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7656 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7657 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7658 iemFpuMaybePopOne(pFpuCtx);
7659 iemFpuMaybePopOne(pFpuCtx);
7660}
7661
7662
7663/**
7664 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7665 *
7666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7667 * @param u16FSW The FSW from the current instruction.
7668 * @param iEffSeg The effective memory operand selector register.
7669 * @param GCPtrEff The effective memory operand offset.
7670 */
7671IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7672{
7673 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7674 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7675 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7676 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7677 iemFpuMaybePopOne(pFpuCtx);
7678}
7679
7680
7681/**
7682 * Worker routine for raising an FPU stack underflow exception.
7683 *
7684 * @param pFpuCtx The FPU context.
7685 * @param iStReg The stack register being accessed.
7686 */
7687IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7688{
7689 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7690 if (pFpuCtx->FCW & X86_FCW_IM)
7691 {
7692 /* Masked underflow. */
7693 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7694 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7695 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7696 if (iStReg != UINT8_MAX)
7697 {
7698 pFpuCtx->FTW |= RT_BIT(iReg);
7699 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7700 }
7701 }
7702 else
7703 {
7704 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7705 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7706 }
7707}
7708
7709
7710/**
7711 * Raises a FPU stack underflow exception.
7712 *
7713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7714 * @param iStReg The destination register that should be loaded
7715 * with QNaN if \#IS is not masked. Specify
7716 * UINT8_MAX if none (like for fcom).
7717 */
7718DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7719{
7720 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7721 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7722 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7723}
7724
7725
7726DECL_NO_INLINE(IEM_STATIC, void)
7727iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7728{
7729 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7730 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7731 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7732 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7733}
7734
7735
7736DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7737{
7738 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7739 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7740 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7741 iemFpuMaybePopOne(pFpuCtx);
7742}
7743
7744
7745DECL_NO_INLINE(IEM_STATIC, void)
7746iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7747{
7748 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7749 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7750 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7751 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7752 iemFpuMaybePopOne(pFpuCtx);
7753}
7754
7755
7756DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7757{
7758 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7759 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7760 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7761 iemFpuMaybePopOne(pFpuCtx);
7762 iemFpuMaybePopOne(pFpuCtx);
7763}
7764
7765
7766DECL_NO_INLINE(IEM_STATIC, void)
7767iemFpuStackPushUnderflow(PVMCPU pVCpu)
7768{
7769 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7770 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7771
7772 if (pFpuCtx->FCW & X86_FCW_IM)
7773 {
7774 /* Masked overflow - Push QNaN. */
7775 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7776 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7777 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7778 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7779 pFpuCtx->FTW |= RT_BIT(iNewTop);
7780 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7781 iemFpuRotateStackPush(pFpuCtx);
7782 }
7783 else
7784 {
7785 /* Exception pending - don't change TOP or the register stack. */
7786 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7787 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7788 }
7789}
7790
7791
7792DECL_NO_INLINE(IEM_STATIC, void)
7793iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7794{
7795 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7796 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7797
7798 if (pFpuCtx->FCW & X86_FCW_IM)
7799 {
7800 /* Masked overflow - Push QNaN. */
7801 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7802 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7803 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7804 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7805 pFpuCtx->FTW |= RT_BIT(iNewTop);
7806 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7807 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7808 iemFpuRotateStackPush(pFpuCtx);
7809 }
7810 else
7811 {
7812 /* Exception pending - don't change TOP or the register stack. */
7813 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7814 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7815 }
7816}
7817
7818
7819/**
7820 * Worker routine for raising an FPU stack overflow exception on a push.
7821 *
7822 * @param pFpuCtx The FPU context.
7823 */
7824IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7825{
7826 if (pFpuCtx->FCW & X86_FCW_IM)
7827 {
7828 /* Masked overflow. */
7829 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7830 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7831 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7832 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7833 pFpuCtx->FTW |= RT_BIT(iNewTop);
7834 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7835 iemFpuRotateStackPush(pFpuCtx);
7836 }
7837 else
7838 {
7839 /* Exception pending - don't change TOP or the register stack. */
7840 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7841 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7842 }
7843}
7844
7845
7846/**
7847 * Raises a FPU stack overflow exception on a push.
7848 *
7849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7850 */
7851DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7852{
7853 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7854 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7855 iemFpuStackPushOverflowOnly(pFpuCtx);
7856}
7857
7858
7859/**
7860 * Raises a FPU stack overflow exception on a push with a memory operand.
7861 *
7862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7863 * @param iEffSeg The effective memory operand selector register.
7864 * @param GCPtrEff The effective memory operand offset.
7865 */
7866DECL_NO_INLINE(IEM_STATIC, void)
7867iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7868{
7869 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7870 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7871 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7872 iemFpuStackPushOverflowOnly(pFpuCtx);
7873}
7874
7875
7876IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7877{
7878 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7879 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7880 if (pFpuCtx->FTW & RT_BIT(iReg))
7881 return VINF_SUCCESS;
7882 return VERR_NOT_FOUND;
7883}
7884
7885
7886IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7887{
7888 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7889 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7890 if (pFpuCtx->FTW & RT_BIT(iReg))
7891 {
7892 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7893 return VINF_SUCCESS;
7894 }
7895 return VERR_NOT_FOUND;
7896}
7897
7898
7899IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7900 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7901{
7902 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7903 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7904 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7905 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7906 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7907 {
7908 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7909 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7910 return VINF_SUCCESS;
7911 }
7912 return VERR_NOT_FOUND;
7913}
7914
7915
7916IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7917{
7918 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7919 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7920 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7921 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7922 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7923 {
7924 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7925 return VINF_SUCCESS;
7926 }
7927 return VERR_NOT_FOUND;
7928}
7929
7930
7931/**
7932 * Updates the FPU exception status after FCW is changed.
7933 *
7934 * @param pFpuCtx The FPU context.
7935 */
7936IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7937{
7938 uint16_t u16Fsw = pFpuCtx->FSW;
7939 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7940 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7941 else
7942 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7943 pFpuCtx->FSW = u16Fsw;
7944}
7945
7946
7947/**
7948 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7949 *
7950 * @returns The full FTW.
7951 * @param pFpuCtx The FPU context.
7952 */
7953IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7954{
7955 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7956 uint16_t u16Ftw = 0;
7957 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7958 for (unsigned iSt = 0; iSt < 8; iSt++)
7959 {
7960 unsigned const iReg = (iSt + iTop) & 7;
7961 if (!(u8Ftw & RT_BIT(iReg)))
7962 u16Ftw |= 3 << (iReg * 2); /* empty */
7963 else
7964 {
7965 uint16_t uTag;
7966 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7967 if (pr80Reg->s.uExponent == 0x7fff)
7968 uTag = 2; /* Exponent is all 1's => Special. */
7969 else if (pr80Reg->s.uExponent == 0x0000)
7970 {
7971 if (pr80Reg->s.u64Mantissa == 0x0000)
7972 uTag = 1; /* All bits are zero => Zero. */
7973 else
7974 uTag = 2; /* Must be special. */
7975 }
7976 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7977 uTag = 0; /* Valid. */
7978 else
7979 uTag = 2; /* Must be special. */
7980
7981 u16Ftw |= uTag << (iReg * 2); /* empty */
7982 }
7983 }
7984
7985 return u16Ftw;
7986}
7987
7988
7989/**
7990 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7991 *
7992 * @returns The compressed FTW.
7993 * @param u16FullFtw The full FTW to convert.
7994 */
7995IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7996{
7997 uint8_t u8Ftw = 0;
7998 for (unsigned i = 0; i < 8; i++)
7999 {
8000 if ((u16FullFtw & 3) != 3 /*empty*/)
8001 u8Ftw |= RT_BIT(i);
8002 u16FullFtw >>= 2;
8003 }
8004
8005 return u8Ftw;
8006}
8007
8008/** @} */
8009
8010
8011/** @name Memory access.
8012 *
8013 * @{
8014 */
8015
8016
8017/**
8018 * Updates the IEMCPU::cbWritten counter if applicable.
8019 *
8020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8021 * @param fAccess The access being accounted for.
8022 * @param cbMem The access size.
8023 */
8024DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
8025{
8026 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8027 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8028 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8029}
8030
8031
8032/**
8033 * Checks if the given segment can be written to, raise the appropriate
8034 * exception if not.
8035 *
8036 * @returns VBox strict status code.
8037 *
8038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8039 * @param pHid Pointer to the hidden register.
8040 * @param iSegReg The register number.
8041 * @param pu64BaseAddr Where to return the base address to use for the
8042 * segment. (In 64-bit code it may differ from the
8043 * base in the hidden segment.)
8044 */
8045IEM_STATIC VBOXSTRICTRC
8046iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8047{
8048 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8049
8050 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8051 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8052 else
8053 {
8054 if (!pHid->Attr.n.u1Present)
8055 {
8056 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8057 AssertRelease(uSel == 0);
8058 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8059 return iemRaiseGeneralProtectionFault0(pVCpu);
8060 }
8061
8062 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8063 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8064 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8065 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8066 *pu64BaseAddr = pHid->u64Base;
8067 }
8068 return VINF_SUCCESS;
8069}
8070
8071
8072/**
8073 * Checks if the given segment can be read from, raise the appropriate
8074 * exception if not.
8075 *
8076 * @returns VBox strict status code.
8077 *
8078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8079 * @param pHid Pointer to the hidden register.
8080 * @param iSegReg The register number.
8081 * @param pu64BaseAddr Where to return the base address to use for the
8082 * segment. (In 64-bit code it may differ from the
8083 * base in the hidden segment.)
8084 */
8085IEM_STATIC VBOXSTRICTRC
8086iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8087{
8088 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8089
8090 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8091 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8092 else
8093 {
8094 if (!pHid->Attr.n.u1Present)
8095 {
8096 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8097 AssertRelease(uSel == 0);
8098 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8099 return iemRaiseGeneralProtectionFault0(pVCpu);
8100 }
8101
8102 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8103 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8104 *pu64BaseAddr = pHid->u64Base;
8105 }
8106 return VINF_SUCCESS;
8107}
8108
8109
8110/**
8111 * Applies the segment limit, base and attributes.
8112 *
8113 * This may raise a \#GP or \#SS.
8114 *
8115 * @returns VBox strict status code.
8116 *
8117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8118 * @param fAccess The kind of access which is being performed.
8119 * @param iSegReg The index of the segment register to apply.
8120 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8121 * TSS, ++).
8122 * @param cbMem The access size.
8123 * @param pGCPtrMem Pointer to the guest memory address to apply
8124 * segmentation to. Input and output parameter.
8125 */
8126IEM_STATIC VBOXSTRICTRC
8127iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8128{
8129 if (iSegReg == UINT8_MAX)
8130 return VINF_SUCCESS;
8131
8132 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8133 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8134 switch (pVCpu->iem.s.enmCpuMode)
8135 {
8136 case IEMMODE_16BIT:
8137 case IEMMODE_32BIT:
8138 {
8139 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8140 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8141
8142 if ( pSel->Attr.n.u1Present
8143 && !pSel->Attr.n.u1Unusable)
8144 {
8145 Assert(pSel->Attr.n.u1DescType);
8146 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8147 {
8148 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8149 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8150 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8151
8152 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8153 {
8154 /** @todo CPL check. */
8155 }
8156
8157 /*
8158 * There are two kinds of data selectors, normal and expand down.
8159 */
8160 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8161 {
8162 if ( GCPtrFirst32 > pSel->u32Limit
8163 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8164 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8165 }
8166 else
8167 {
8168 /*
8169 * The upper boundary is defined by the B bit, not the G bit!
8170 */
8171 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8172 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8173 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8174 }
8175 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8176 }
8177 else
8178 {
8179
8180 /*
8181 * Code selector and usually be used to read thru, writing is
8182 * only permitted in real and V8086 mode.
8183 */
8184 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8185 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8186 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8187 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8188 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8189
8190 if ( GCPtrFirst32 > pSel->u32Limit
8191 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8192 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8193
8194 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8195 {
8196 /** @todo CPL check. */
8197 }
8198
8199 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8200 }
8201 }
8202 else
8203 return iemRaiseGeneralProtectionFault0(pVCpu);
8204 return VINF_SUCCESS;
8205 }
8206
8207 case IEMMODE_64BIT:
8208 {
8209 RTGCPTR GCPtrMem = *pGCPtrMem;
8210 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8211 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8212
8213 Assert(cbMem >= 1);
8214 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8215 return VINF_SUCCESS;
8216 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8217 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8218 return iemRaiseGeneralProtectionFault0(pVCpu);
8219 }
8220
8221 default:
8222 AssertFailedReturn(VERR_IEM_IPE_7);
8223 }
8224}
8225
8226
8227/**
8228 * Translates a virtual address to a physical physical address and checks if we
8229 * can access the page as specified.
8230 *
8231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8232 * @param GCPtrMem The virtual address.
8233 * @param fAccess The intended access.
8234 * @param pGCPhysMem Where to return the physical address.
8235 */
8236IEM_STATIC VBOXSTRICTRC
8237iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8238{
8239 /** @todo Need a different PGM interface here. We're currently using
8240 * generic / REM interfaces. this won't cut it for R0 & RC. */
8241 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8242 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8243 RTGCPHYS GCPhys;
8244 uint64_t fFlags;
8245 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8246 if (RT_FAILURE(rc))
8247 {
8248 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8249 /** @todo Check unassigned memory in unpaged mode. */
8250 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8251 *pGCPhysMem = NIL_RTGCPHYS;
8252 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8253 }
8254
8255 /* If the page is writable and does not have the no-exec bit set, all
8256 access is allowed. Otherwise we'll have to check more carefully... */
8257 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8258 {
8259 /* Write to read only memory? */
8260 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8261 && !(fFlags & X86_PTE_RW)
8262 && ( (pVCpu->iem.s.uCpl == 3
8263 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8264 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8265 {
8266 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8267 *pGCPhysMem = NIL_RTGCPHYS;
8268 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8269 }
8270
8271 /* Kernel memory accessed by userland? */
8272 if ( !(fFlags & X86_PTE_US)
8273 && pVCpu->iem.s.uCpl == 3
8274 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8275 {
8276 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8277 *pGCPhysMem = NIL_RTGCPHYS;
8278 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8279 }
8280
8281 /* Executing non-executable memory? */
8282 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8283 && (fFlags & X86_PTE_PAE_NX)
8284 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8285 {
8286 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8287 *pGCPhysMem = NIL_RTGCPHYS;
8288 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8289 VERR_ACCESS_DENIED);
8290 }
8291 }
8292
8293 /*
8294 * Set the dirty / access flags.
8295 * ASSUMES this is set when the address is translated rather than on committ...
8296 */
8297 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8298 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8299 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8300 {
8301 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8302 AssertRC(rc2);
8303 }
8304
8305 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8306 *pGCPhysMem = GCPhys;
8307 return VINF_SUCCESS;
8308}
8309
8310
8311
8312/**
8313 * Maps a physical page.
8314 *
8315 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8317 * @param GCPhysMem The physical address.
8318 * @param fAccess The intended access.
8319 * @param ppvMem Where to return the mapping address.
8320 * @param pLock The PGM lock.
8321 */
8322IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8323{
8324#ifdef IEM_LOG_MEMORY_WRITES
8325 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8326 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8327#endif
8328
8329 /** @todo This API may require some improving later. A private deal with PGM
8330 * regarding locking and unlocking needs to be struct. A couple of TLBs
8331 * living in PGM, but with publicly accessible inlined access methods
8332 * could perhaps be an even better solution. */
8333 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8334 GCPhysMem,
8335 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8336 pVCpu->iem.s.fBypassHandlers,
8337 ppvMem,
8338 pLock);
8339 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8340 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8341
8342 return rc;
8343}
8344
8345
8346/**
8347 * Unmap a page previously mapped by iemMemPageMap.
8348 *
8349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8350 * @param GCPhysMem The physical address.
8351 * @param fAccess The intended access.
8352 * @param pvMem What iemMemPageMap returned.
8353 * @param pLock The PGM lock.
8354 */
8355DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8356{
8357 NOREF(pVCpu);
8358 NOREF(GCPhysMem);
8359 NOREF(fAccess);
8360 NOREF(pvMem);
8361 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8362}
8363
8364
8365/**
8366 * Looks up a memory mapping entry.
8367 *
8368 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8370 * @param pvMem The memory address.
8371 * @param fAccess The access to.
8372 */
8373DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8374{
8375 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8376 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8377 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8378 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8379 return 0;
8380 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8381 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8382 return 1;
8383 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8384 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8385 return 2;
8386 return VERR_NOT_FOUND;
8387}
8388
8389
8390/**
8391 * Finds a free memmap entry when using iNextMapping doesn't work.
8392 *
8393 * @returns Memory mapping index, 1024 on failure.
8394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8395 */
8396IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8397{
8398 /*
8399 * The easy case.
8400 */
8401 if (pVCpu->iem.s.cActiveMappings == 0)
8402 {
8403 pVCpu->iem.s.iNextMapping = 1;
8404 return 0;
8405 }
8406
8407 /* There should be enough mappings for all instructions. */
8408 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8409
8410 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8411 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8412 return i;
8413
8414 AssertFailedReturn(1024);
8415}
8416
8417
8418/**
8419 * Commits a bounce buffer that needs writing back and unmaps it.
8420 *
8421 * @returns Strict VBox status code.
8422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8423 * @param iMemMap The index of the buffer to commit.
8424 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8425 * Always false in ring-3, obviously.
8426 */
8427IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8428{
8429 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8430 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8431#ifdef IN_RING3
8432 Assert(!fPostponeFail);
8433 RT_NOREF_PV(fPostponeFail);
8434#endif
8435
8436 /*
8437 * Do the writing.
8438 */
8439 PVM pVM = pVCpu->CTX_SUFF(pVM);
8440 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8441 {
8442 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8443 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8444 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8445 if (!pVCpu->iem.s.fBypassHandlers)
8446 {
8447 /*
8448 * Carefully and efficiently dealing with access handler return
8449 * codes make this a little bloated.
8450 */
8451 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8453 pbBuf,
8454 cbFirst,
8455 PGMACCESSORIGIN_IEM);
8456 if (rcStrict == VINF_SUCCESS)
8457 {
8458 if (cbSecond)
8459 {
8460 rcStrict = PGMPhysWrite(pVM,
8461 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8462 pbBuf + cbFirst,
8463 cbSecond,
8464 PGMACCESSORIGIN_IEM);
8465 if (rcStrict == VINF_SUCCESS)
8466 { /* nothing */ }
8467 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8468 {
8469 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8471 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8473 }
8474#ifndef IN_RING3
8475 else if (fPostponeFail)
8476 {
8477 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8480 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8481 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8482 return iemSetPassUpStatus(pVCpu, rcStrict);
8483 }
8484#endif
8485 else
8486 {
8487 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8489 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8490 return rcStrict;
8491 }
8492 }
8493 }
8494 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8495 {
8496 if (!cbSecond)
8497 {
8498 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8499 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8500 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8501 }
8502 else
8503 {
8504 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8506 pbBuf + cbFirst,
8507 cbSecond,
8508 PGMACCESSORIGIN_IEM);
8509 if (rcStrict2 == VINF_SUCCESS)
8510 {
8511 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8512 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8513 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8514 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8515 }
8516 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8517 {
8518 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8519 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8520 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8521 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8522 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8523 }
8524#ifndef IN_RING3
8525 else if (fPostponeFail)
8526 {
8527 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8528 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8529 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8530 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8531 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8532 return iemSetPassUpStatus(pVCpu, rcStrict);
8533 }
8534#endif
8535 else
8536 {
8537 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8538 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8539 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8540 return rcStrict2;
8541 }
8542 }
8543 }
8544#ifndef IN_RING3
8545 else if (fPostponeFail)
8546 {
8547 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8548 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8549 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8550 if (!cbSecond)
8551 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8552 else
8553 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8554 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8555 return iemSetPassUpStatus(pVCpu, rcStrict);
8556 }
8557#endif
8558 else
8559 {
8560 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8561 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8562 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8563 return rcStrict;
8564 }
8565 }
8566 else
8567 {
8568 /*
8569 * No access handlers, much simpler.
8570 */
8571 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8572 if (RT_SUCCESS(rc))
8573 {
8574 if (cbSecond)
8575 {
8576 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8577 if (RT_SUCCESS(rc))
8578 { /* likely */ }
8579 else
8580 {
8581 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8582 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8583 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8584 return rc;
8585 }
8586 }
8587 }
8588 else
8589 {
8590 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8591 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8592 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8593 return rc;
8594 }
8595 }
8596 }
8597
8598#if defined(IEM_LOG_MEMORY_WRITES)
8599 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8600 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8601 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8602 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8603 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8604 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8605
8606 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8607 g_cbIemWrote = cbWrote;
8608 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8609#endif
8610
8611 /*
8612 * Free the mapping entry.
8613 */
8614 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8615 Assert(pVCpu->iem.s.cActiveMappings != 0);
8616 pVCpu->iem.s.cActiveMappings--;
8617 return VINF_SUCCESS;
8618}
8619
8620
8621/**
8622 * iemMemMap worker that deals with a request crossing pages.
8623 */
8624IEM_STATIC VBOXSTRICTRC
8625iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8626{
8627 /*
8628 * Do the address translations.
8629 */
8630 RTGCPHYS GCPhysFirst;
8631 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8632 if (rcStrict != VINF_SUCCESS)
8633 return rcStrict;
8634
8635 RTGCPHYS GCPhysSecond;
8636 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8637 fAccess, &GCPhysSecond);
8638 if (rcStrict != VINF_SUCCESS)
8639 return rcStrict;
8640 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8641
8642 PVM pVM = pVCpu->CTX_SUFF(pVM);
8643
8644 /*
8645 * Read in the current memory content if it's a read, execute or partial
8646 * write access.
8647 */
8648 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8649 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8650 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8651
8652 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8653 {
8654 if (!pVCpu->iem.s.fBypassHandlers)
8655 {
8656 /*
8657 * Must carefully deal with access handler status codes here,
8658 * makes the code a bit bloated.
8659 */
8660 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8661 if (rcStrict == VINF_SUCCESS)
8662 {
8663 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8664 if (rcStrict == VINF_SUCCESS)
8665 { /*likely */ }
8666 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8667 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8668 else
8669 {
8670 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8671 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8672 return rcStrict;
8673 }
8674 }
8675 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8676 {
8677 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8678 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8679 {
8680 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8681 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8682 }
8683 else
8684 {
8685 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8686 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8687 return rcStrict2;
8688 }
8689 }
8690 else
8691 {
8692 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8693 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8694 return rcStrict;
8695 }
8696 }
8697 else
8698 {
8699 /*
8700 * No informational status codes here, much more straight forward.
8701 */
8702 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8703 if (RT_SUCCESS(rc))
8704 {
8705 Assert(rc == VINF_SUCCESS);
8706 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8707 if (RT_SUCCESS(rc))
8708 Assert(rc == VINF_SUCCESS);
8709 else
8710 {
8711 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8712 return rc;
8713 }
8714 }
8715 else
8716 {
8717 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8718 return rc;
8719 }
8720 }
8721 }
8722#ifdef VBOX_STRICT
8723 else
8724 memset(pbBuf, 0xcc, cbMem);
8725 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8726 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8727#endif
8728
8729 /*
8730 * Commit the bounce buffer entry.
8731 */
8732 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8733 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8734 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8735 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8736 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8737 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8738 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8739 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8740 pVCpu->iem.s.cActiveMappings++;
8741
8742 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8743 *ppvMem = pbBuf;
8744 return VINF_SUCCESS;
8745}
8746
8747
8748/**
8749 * iemMemMap woker that deals with iemMemPageMap failures.
8750 */
8751IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8752 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8753{
8754 /*
8755 * Filter out conditions we can handle and the ones which shouldn't happen.
8756 */
8757 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8758 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8759 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8760 {
8761 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8762 return rcMap;
8763 }
8764 pVCpu->iem.s.cPotentialExits++;
8765
8766 /*
8767 * Read in the current memory content if it's a read, execute or partial
8768 * write access.
8769 */
8770 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8771 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8772 {
8773 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8774 memset(pbBuf, 0xff, cbMem);
8775 else
8776 {
8777 int rc;
8778 if (!pVCpu->iem.s.fBypassHandlers)
8779 {
8780 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8781 if (rcStrict == VINF_SUCCESS)
8782 { /* nothing */ }
8783 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8784 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8785 else
8786 {
8787 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8788 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8789 return rcStrict;
8790 }
8791 }
8792 else
8793 {
8794 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8795 if (RT_SUCCESS(rc))
8796 { /* likely */ }
8797 else
8798 {
8799 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8800 GCPhysFirst, rc));
8801 return rc;
8802 }
8803 }
8804 }
8805 }
8806#ifdef VBOX_STRICT
8807 else
8808 memset(pbBuf, 0xcc, cbMem);
8809#endif
8810#ifdef VBOX_STRICT
8811 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8812 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8813#endif
8814
8815 /*
8816 * Commit the bounce buffer entry.
8817 */
8818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8820 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8821 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8822 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8823 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8824 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8825 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8826 pVCpu->iem.s.cActiveMappings++;
8827
8828 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8829 *ppvMem = pbBuf;
8830 return VINF_SUCCESS;
8831}
8832
8833
8834
8835/**
8836 * Maps the specified guest memory for the given kind of access.
8837 *
8838 * This may be using bounce buffering of the memory if it's crossing a page
8839 * boundary or if there is an access handler installed for any of it. Because
8840 * of lock prefix guarantees, we're in for some extra clutter when this
8841 * happens.
8842 *
8843 * This may raise a \#GP, \#SS, \#PF or \#AC.
8844 *
8845 * @returns VBox strict status code.
8846 *
8847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8848 * @param ppvMem Where to return the pointer to the mapped
8849 * memory.
8850 * @param cbMem The number of bytes to map. This is usually 1,
8851 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8852 * string operations it can be up to a page.
8853 * @param iSegReg The index of the segment register to use for
8854 * this access. The base and limits are checked.
8855 * Use UINT8_MAX to indicate that no segmentation
8856 * is required (for IDT, GDT and LDT accesses).
8857 * @param GCPtrMem The address of the guest memory.
8858 * @param fAccess How the memory is being accessed. The
8859 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8860 * how to map the memory, while the
8861 * IEM_ACCESS_WHAT_XXX bit is used when raising
8862 * exceptions.
8863 */
8864IEM_STATIC VBOXSTRICTRC
8865iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8866{
8867 /*
8868 * Check the input and figure out which mapping entry to use.
8869 */
8870 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8871 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8872 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8873
8874 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8875 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8876 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8877 {
8878 iMemMap = iemMemMapFindFree(pVCpu);
8879 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8880 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8881 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8882 pVCpu->iem.s.aMemMappings[2].fAccess),
8883 VERR_IEM_IPE_9);
8884 }
8885
8886 /*
8887 * Map the memory, checking that we can actually access it. If something
8888 * slightly complicated happens, fall back on bounce buffering.
8889 */
8890 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8891 if (rcStrict != VINF_SUCCESS)
8892 return rcStrict;
8893
8894 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8895 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8896
8897 RTGCPHYS GCPhysFirst;
8898 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8899 if (rcStrict != VINF_SUCCESS)
8900 return rcStrict;
8901
8902 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8903 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8904 if (fAccess & IEM_ACCESS_TYPE_READ)
8905 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8906
8907 void *pvMem;
8908 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8909 if (rcStrict != VINF_SUCCESS)
8910 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8911
8912 /*
8913 * Fill in the mapping table entry.
8914 */
8915 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8916 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8917 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8918 pVCpu->iem.s.cActiveMappings++;
8919
8920 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8921 *ppvMem = pvMem;
8922
8923 return VINF_SUCCESS;
8924}
8925
8926
8927/**
8928 * Commits the guest memory if bounce buffered and unmaps it.
8929 *
8930 * @returns Strict VBox status code.
8931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8932 * @param pvMem The mapping.
8933 * @param fAccess The kind of access.
8934 */
8935IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8936{
8937 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8938 AssertReturn(iMemMap >= 0, iMemMap);
8939
8940 /* If it's bounce buffered, we may need to write back the buffer. */
8941 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8942 {
8943 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8944 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8945 }
8946 /* Otherwise unlock it. */
8947 else
8948 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8949
8950 /* Free the entry. */
8951 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8952 Assert(pVCpu->iem.s.cActiveMappings != 0);
8953 pVCpu->iem.s.cActiveMappings--;
8954 return VINF_SUCCESS;
8955}
8956
8957#ifdef IEM_WITH_SETJMP
8958
8959/**
8960 * Maps the specified guest memory for the given kind of access, longjmp on
8961 * error.
8962 *
8963 * This may be using bounce buffering of the memory if it's crossing a page
8964 * boundary or if there is an access handler installed for any of it. Because
8965 * of lock prefix guarantees, we're in for some extra clutter when this
8966 * happens.
8967 *
8968 * This may raise a \#GP, \#SS, \#PF or \#AC.
8969 *
8970 * @returns Pointer to the mapped memory.
8971 *
8972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8973 * @param cbMem The number of bytes to map. This is usually 1,
8974 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8975 * string operations it can be up to a page.
8976 * @param iSegReg The index of the segment register to use for
8977 * this access. The base and limits are checked.
8978 * Use UINT8_MAX to indicate that no segmentation
8979 * is required (for IDT, GDT and LDT accesses).
8980 * @param GCPtrMem The address of the guest memory.
8981 * @param fAccess How the memory is being accessed. The
8982 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8983 * how to map the memory, while the
8984 * IEM_ACCESS_WHAT_XXX bit is used when raising
8985 * exceptions.
8986 */
8987IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8988{
8989 /*
8990 * Check the input and figure out which mapping entry to use.
8991 */
8992 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8993 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8994 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8995
8996 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8997 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8998 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8999 {
9000 iMemMap = iemMemMapFindFree(pVCpu);
9001 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9002 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9003 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9004 pVCpu->iem.s.aMemMappings[2].fAccess),
9005 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9006 }
9007
9008 /*
9009 * Map the memory, checking that we can actually access it. If something
9010 * slightly complicated happens, fall back on bounce buffering.
9011 */
9012 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9013 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9014 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9015
9016 /* Crossing a page boundary? */
9017 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9018 { /* No (likely). */ }
9019 else
9020 {
9021 void *pvMem;
9022 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9023 if (rcStrict == VINF_SUCCESS)
9024 return pvMem;
9025 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9026 }
9027
9028 RTGCPHYS GCPhysFirst;
9029 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9030 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9031 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9032
9033 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9034 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9035 if (fAccess & IEM_ACCESS_TYPE_READ)
9036 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9037
9038 void *pvMem;
9039 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9040 if (rcStrict == VINF_SUCCESS)
9041 { /* likely */ }
9042 else
9043 {
9044 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9045 if (rcStrict == VINF_SUCCESS)
9046 return pvMem;
9047 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9048 }
9049
9050 /*
9051 * Fill in the mapping table entry.
9052 */
9053 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9054 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9055 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9056 pVCpu->iem.s.cActiveMappings++;
9057
9058 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9059 return pvMem;
9060}
9061
9062
9063/**
9064 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9065 *
9066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9067 * @param pvMem The mapping.
9068 * @param fAccess The kind of access.
9069 */
9070IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9071{
9072 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9073 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9074
9075 /* If it's bounce buffered, we may need to write back the buffer. */
9076 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9077 {
9078 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9079 {
9080 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9081 if (rcStrict == VINF_SUCCESS)
9082 return;
9083 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9084 }
9085 }
9086 /* Otherwise unlock it. */
9087 else
9088 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9089
9090 /* Free the entry. */
9091 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9092 Assert(pVCpu->iem.s.cActiveMappings != 0);
9093 pVCpu->iem.s.cActiveMappings--;
9094}
9095
9096#endif /* IEM_WITH_SETJMP */
9097
9098#ifndef IN_RING3
9099/**
9100 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9101 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9102 *
9103 * Allows the instruction to be completed and retired, while the IEM user will
9104 * return to ring-3 immediately afterwards and do the postponed writes there.
9105 *
9106 * @returns VBox status code (no strict statuses). Caller must check
9107 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9109 * @param pvMem The mapping.
9110 * @param fAccess The kind of access.
9111 */
9112IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9113{
9114 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9115 AssertReturn(iMemMap >= 0, iMemMap);
9116
9117 /* If it's bounce buffered, we may need to write back the buffer. */
9118 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9119 {
9120 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9121 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9122 }
9123 /* Otherwise unlock it. */
9124 else
9125 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9126
9127 /* Free the entry. */
9128 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9129 Assert(pVCpu->iem.s.cActiveMappings != 0);
9130 pVCpu->iem.s.cActiveMappings--;
9131 return VINF_SUCCESS;
9132}
9133#endif
9134
9135
9136/**
9137 * Rollbacks mappings, releasing page locks and such.
9138 *
9139 * The caller shall only call this after checking cActiveMappings.
9140 *
9141 * @returns Strict VBox status code to pass up.
9142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9143 */
9144IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9145{
9146 Assert(pVCpu->iem.s.cActiveMappings > 0);
9147
9148 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9149 while (iMemMap-- > 0)
9150 {
9151 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9152 if (fAccess != IEM_ACCESS_INVALID)
9153 {
9154 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9155 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9156 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9157 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9158 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9159 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9160 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9161 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9162 pVCpu->iem.s.cActiveMappings--;
9163 }
9164 }
9165}
9166
9167
9168/**
9169 * Fetches a data byte.
9170 *
9171 * @returns Strict VBox status code.
9172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9173 * @param pu8Dst Where to return the byte.
9174 * @param iSegReg The index of the segment register to use for
9175 * this access. The base and limits are checked.
9176 * @param GCPtrMem The address of the guest memory.
9177 */
9178IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9179{
9180 /* The lazy approach for now... */
9181 uint8_t const *pu8Src;
9182 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9183 if (rc == VINF_SUCCESS)
9184 {
9185 *pu8Dst = *pu8Src;
9186 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9187 }
9188 return rc;
9189}
9190
9191
9192#ifdef IEM_WITH_SETJMP
9193/**
9194 * Fetches a data byte, longjmp on error.
9195 *
9196 * @returns The byte.
9197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9198 * @param iSegReg The index of the segment register to use for
9199 * this access. The base and limits are checked.
9200 * @param GCPtrMem The address of the guest memory.
9201 */
9202DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9203{
9204 /* The lazy approach for now... */
9205 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9206 uint8_t const bRet = *pu8Src;
9207 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9208 return bRet;
9209}
9210#endif /* IEM_WITH_SETJMP */
9211
9212
9213/**
9214 * Fetches a data word.
9215 *
9216 * @returns Strict VBox status code.
9217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9218 * @param pu16Dst Where to return the word.
9219 * @param iSegReg The index of the segment register to use for
9220 * this access. The base and limits are checked.
9221 * @param GCPtrMem The address of the guest memory.
9222 */
9223IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9224{
9225 /* The lazy approach for now... */
9226 uint16_t const *pu16Src;
9227 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9228 if (rc == VINF_SUCCESS)
9229 {
9230 *pu16Dst = *pu16Src;
9231 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9232 }
9233 return rc;
9234}
9235
9236
9237#ifdef IEM_WITH_SETJMP
9238/**
9239 * Fetches a data word, longjmp on error.
9240 *
9241 * @returns The word
9242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9243 * @param iSegReg The index of the segment register to use for
9244 * this access. The base and limits are checked.
9245 * @param GCPtrMem The address of the guest memory.
9246 */
9247DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9248{
9249 /* The lazy approach for now... */
9250 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9251 uint16_t const u16Ret = *pu16Src;
9252 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9253 return u16Ret;
9254}
9255#endif
9256
9257
9258/**
9259 * Fetches a data dword.
9260 *
9261 * @returns Strict VBox status code.
9262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9263 * @param pu32Dst Where to return the dword.
9264 * @param iSegReg The index of the segment register to use for
9265 * this access. The base and limits are checked.
9266 * @param GCPtrMem The address of the guest memory.
9267 */
9268IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9269{
9270 /* The lazy approach for now... */
9271 uint32_t const *pu32Src;
9272 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9273 if (rc == VINF_SUCCESS)
9274 {
9275 *pu32Dst = *pu32Src;
9276 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9277 }
9278 return rc;
9279}
9280
9281
9282#ifdef IEM_WITH_SETJMP
9283
9284IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9285{
9286 Assert(cbMem >= 1);
9287 Assert(iSegReg < X86_SREG_COUNT);
9288
9289 /*
9290 * 64-bit mode is simpler.
9291 */
9292 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9293 {
9294 if (iSegReg >= X86_SREG_FS)
9295 {
9296 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9297 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9298 GCPtrMem += pSel->u64Base;
9299 }
9300
9301 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9302 return GCPtrMem;
9303 }
9304 /*
9305 * 16-bit and 32-bit segmentation.
9306 */
9307 else
9308 {
9309 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9310 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9311 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9312 == X86DESCATTR_P /* data, expand up */
9313 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9314 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9315 {
9316 /* expand up */
9317 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9318 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9319 && GCPtrLast32 > (uint32_t)GCPtrMem))
9320 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9321 }
9322 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9323 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9324 {
9325 /* expand down */
9326 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9327 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9328 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9329 && GCPtrLast32 > (uint32_t)GCPtrMem))
9330 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9331 }
9332 else
9333 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9334 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9335 }
9336 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9337}
9338
9339
9340IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9341{
9342 Assert(cbMem >= 1);
9343 Assert(iSegReg < X86_SREG_COUNT);
9344
9345 /*
9346 * 64-bit mode is simpler.
9347 */
9348 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9349 {
9350 if (iSegReg >= X86_SREG_FS)
9351 {
9352 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9353 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9354 GCPtrMem += pSel->u64Base;
9355 }
9356
9357 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9358 return GCPtrMem;
9359 }
9360 /*
9361 * 16-bit and 32-bit segmentation.
9362 */
9363 else
9364 {
9365 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9366 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9367 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9368 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9369 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9370 {
9371 /* expand up */
9372 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9373 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9374 && GCPtrLast32 > (uint32_t)GCPtrMem))
9375 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9376 }
9377 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9378 {
9379 /* expand down */
9380 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9381 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9382 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9383 && GCPtrLast32 > (uint32_t)GCPtrMem))
9384 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9385 }
9386 else
9387 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9388 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9389 }
9390 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9391}
9392
9393
9394/**
9395 * Fetches a data dword, longjmp on error, fallback/safe version.
9396 *
9397 * @returns The dword
9398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9399 * @param iSegReg The index of the segment register to use for
9400 * this access. The base and limits are checked.
9401 * @param GCPtrMem The address of the guest memory.
9402 */
9403IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9404{
9405 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9406 uint32_t const u32Ret = *pu32Src;
9407 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9408 return u32Ret;
9409}
9410
9411
9412/**
9413 * Fetches a data dword, longjmp on error.
9414 *
9415 * @returns The dword
9416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9417 * @param iSegReg The index of the segment register to use for
9418 * this access. The base and limits are checked.
9419 * @param GCPtrMem The address of the guest memory.
9420 */
9421DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9422{
9423# ifdef IEM_WITH_DATA_TLB
9424 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9425 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9426 {
9427 /// @todo more later.
9428 }
9429
9430 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9431# else
9432 /* The lazy approach. */
9433 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9434 uint32_t const u32Ret = *pu32Src;
9435 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9436 return u32Ret;
9437# endif
9438}
9439#endif
9440
9441
9442#ifdef SOME_UNUSED_FUNCTION
9443/**
9444 * Fetches a data dword and sign extends it to a qword.
9445 *
9446 * @returns Strict VBox status code.
9447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9448 * @param pu64Dst Where to return the sign extended value.
9449 * @param iSegReg The index of the segment register to use for
9450 * this access. The base and limits are checked.
9451 * @param GCPtrMem The address of the guest memory.
9452 */
9453IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9454{
9455 /* The lazy approach for now... */
9456 int32_t const *pi32Src;
9457 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9458 if (rc == VINF_SUCCESS)
9459 {
9460 *pu64Dst = *pi32Src;
9461 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9462 }
9463#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9464 else
9465 *pu64Dst = 0;
9466#endif
9467 return rc;
9468}
9469#endif
9470
9471
9472/**
9473 * Fetches a data qword.
9474 *
9475 * @returns Strict VBox status code.
9476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9477 * @param pu64Dst Where to return the qword.
9478 * @param iSegReg The index of the segment register to use for
9479 * this access. The base and limits are checked.
9480 * @param GCPtrMem The address of the guest memory.
9481 */
9482IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9483{
9484 /* The lazy approach for now... */
9485 uint64_t const *pu64Src;
9486 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9487 if (rc == VINF_SUCCESS)
9488 {
9489 *pu64Dst = *pu64Src;
9490 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9491 }
9492 return rc;
9493}
9494
9495
9496#ifdef IEM_WITH_SETJMP
9497/**
9498 * Fetches a data qword, longjmp on error.
9499 *
9500 * @returns The qword.
9501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9502 * @param iSegReg The index of the segment register to use for
9503 * this access. The base and limits are checked.
9504 * @param GCPtrMem The address of the guest memory.
9505 */
9506DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9507{
9508 /* The lazy approach for now... */
9509 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9510 uint64_t const u64Ret = *pu64Src;
9511 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9512 return u64Ret;
9513}
9514#endif
9515
9516
9517/**
9518 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9519 *
9520 * @returns Strict VBox status code.
9521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9522 * @param pu64Dst Where to return the qword.
9523 * @param iSegReg The index of the segment register to use for
9524 * this access. The base and limits are checked.
9525 * @param GCPtrMem The address of the guest memory.
9526 */
9527IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9528{
9529 /* The lazy approach for now... */
9530 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9531 if (RT_UNLIKELY(GCPtrMem & 15))
9532 return iemRaiseGeneralProtectionFault0(pVCpu);
9533
9534 uint64_t const *pu64Src;
9535 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9536 if (rc == VINF_SUCCESS)
9537 {
9538 *pu64Dst = *pu64Src;
9539 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9540 }
9541 return rc;
9542}
9543
9544
9545#ifdef IEM_WITH_SETJMP
9546/**
9547 * Fetches a data qword, longjmp on error.
9548 *
9549 * @returns The qword.
9550 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9551 * @param iSegReg The index of the segment register to use for
9552 * this access. The base and limits are checked.
9553 * @param GCPtrMem The address of the guest memory.
9554 */
9555DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9556{
9557 /* The lazy approach for now... */
9558 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9559 if (RT_LIKELY(!(GCPtrMem & 15)))
9560 {
9561 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9562 uint64_t const u64Ret = *pu64Src;
9563 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9564 return u64Ret;
9565 }
9566
9567 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9568 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9569}
9570#endif
9571
9572
9573/**
9574 * Fetches a data tword.
9575 *
9576 * @returns Strict VBox status code.
9577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9578 * @param pr80Dst Where to return the tword.
9579 * @param iSegReg The index of the segment register to use for
9580 * this access. The base and limits are checked.
9581 * @param GCPtrMem The address of the guest memory.
9582 */
9583IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9584{
9585 /* The lazy approach for now... */
9586 PCRTFLOAT80U pr80Src;
9587 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9588 if (rc == VINF_SUCCESS)
9589 {
9590 *pr80Dst = *pr80Src;
9591 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9592 }
9593 return rc;
9594}
9595
9596
9597#ifdef IEM_WITH_SETJMP
9598/**
9599 * Fetches a data tword, longjmp on error.
9600 *
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param pr80Dst Where to return the tword.
9603 * @param iSegReg The index of the segment register to use for
9604 * this access. The base and limits are checked.
9605 * @param GCPtrMem The address of the guest memory.
9606 */
9607DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9608{
9609 /* The lazy approach for now... */
9610 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9611 *pr80Dst = *pr80Src;
9612 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9613}
9614#endif
9615
9616
9617/**
9618 * Fetches a data dqword (double qword), generally SSE related.
9619 *
9620 * @returns Strict VBox status code.
9621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9622 * @param pu128Dst Where to return the qword.
9623 * @param iSegReg The index of the segment register to use for
9624 * this access. The base and limits are checked.
9625 * @param GCPtrMem The address of the guest memory.
9626 */
9627IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9628{
9629 /* The lazy approach for now... */
9630 PCRTUINT128U pu128Src;
9631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9632 if (rc == VINF_SUCCESS)
9633 {
9634 pu128Dst->au64[0] = pu128Src->au64[0];
9635 pu128Dst->au64[1] = pu128Src->au64[1];
9636 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9637 }
9638 return rc;
9639}
9640
9641
9642#ifdef IEM_WITH_SETJMP
9643/**
9644 * Fetches a data dqword (double qword), generally SSE related.
9645 *
9646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9647 * @param pu128Dst Where to return the qword.
9648 * @param iSegReg The index of the segment register to use for
9649 * this access. The base and limits are checked.
9650 * @param GCPtrMem The address of the guest memory.
9651 */
9652IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9653{
9654 /* The lazy approach for now... */
9655 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9656 pu128Dst->au64[0] = pu128Src->au64[0];
9657 pu128Dst->au64[1] = pu128Src->au64[1];
9658 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9659}
9660#endif
9661
9662
9663/**
9664 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9665 * related.
9666 *
9667 * Raises \#GP(0) if not aligned.
9668 *
9669 * @returns Strict VBox status code.
9670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9671 * @param pu128Dst Where to return the qword.
9672 * @param iSegReg The index of the segment register to use for
9673 * this access. The base and limits are checked.
9674 * @param GCPtrMem The address of the guest memory.
9675 */
9676IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9677{
9678 /* The lazy approach for now... */
9679 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9680 if ( (GCPtrMem & 15)
9681 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9682 return iemRaiseGeneralProtectionFault0(pVCpu);
9683
9684 PCRTUINT128U pu128Src;
9685 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9686 if (rc == VINF_SUCCESS)
9687 {
9688 pu128Dst->au64[0] = pu128Src->au64[0];
9689 pu128Dst->au64[1] = pu128Src->au64[1];
9690 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9691 }
9692 return rc;
9693}
9694
9695
9696#ifdef IEM_WITH_SETJMP
9697/**
9698 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9699 * related, longjmp on error.
9700 *
9701 * Raises \#GP(0) if not aligned.
9702 *
9703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9704 * @param pu128Dst Where to return the qword.
9705 * @param iSegReg The index of the segment register to use for
9706 * this access. The base and limits are checked.
9707 * @param GCPtrMem The address of the guest memory.
9708 */
9709DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9710{
9711 /* The lazy approach for now... */
9712 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9713 if ( (GCPtrMem & 15) == 0
9714 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9715 {
9716 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9717 pu128Dst->au64[0] = pu128Src->au64[0];
9718 pu128Dst->au64[1] = pu128Src->au64[1];
9719 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9720 return;
9721 }
9722
9723 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9724 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9725}
9726#endif
9727
9728
9729/**
9730 * Fetches a data oword (octo word), generally AVX related.
9731 *
9732 * @returns Strict VBox status code.
9733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9734 * @param pu256Dst Where to return the qword.
9735 * @param iSegReg The index of the segment register to use for
9736 * this access. The base and limits are checked.
9737 * @param GCPtrMem The address of the guest memory.
9738 */
9739IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9740{
9741 /* The lazy approach for now... */
9742 PCRTUINT256U pu256Src;
9743 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9744 if (rc == VINF_SUCCESS)
9745 {
9746 pu256Dst->au64[0] = pu256Src->au64[0];
9747 pu256Dst->au64[1] = pu256Src->au64[1];
9748 pu256Dst->au64[2] = pu256Src->au64[2];
9749 pu256Dst->au64[3] = pu256Src->au64[3];
9750 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9751 }
9752 return rc;
9753}
9754
9755
9756#ifdef IEM_WITH_SETJMP
9757/**
9758 * Fetches a data oword (octo word), generally AVX related.
9759 *
9760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9761 * @param pu256Dst Where to return the qword.
9762 * @param iSegReg The index of the segment register to use for
9763 * this access. The base and limits are checked.
9764 * @param GCPtrMem The address of the guest memory.
9765 */
9766IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9767{
9768 /* The lazy approach for now... */
9769 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9770 pu256Dst->au64[0] = pu256Src->au64[0];
9771 pu256Dst->au64[1] = pu256Src->au64[1];
9772 pu256Dst->au64[2] = pu256Src->au64[2];
9773 pu256Dst->au64[3] = pu256Src->au64[3];
9774 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9775}
9776#endif
9777
9778
9779/**
9780 * Fetches a data oword (octo word) at an aligned address, generally AVX
9781 * related.
9782 *
9783 * Raises \#GP(0) if not aligned.
9784 *
9785 * @returns Strict VBox status code.
9786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9787 * @param pu256Dst Where to return the qword.
9788 * @param iSegReg The index of the segment register to use for
9789 * this access. The base and limits are checked.
9790 * @param GCPtrMem The address of the guest memory.
9791 */
9792IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9793{
9794 /* The lazy approach for now... */
9795 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9796 if (GCPtrMem & 31)
9797 return iemRaiseGeneralProtectionFault0(pVCpu);
9798
9799 PCRTUINT256U pu256Src;
9800 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9801 if (rc == VINF_SUCCESS)
9802 {
9803 pu256Dst->au64[0] = pu256Src->au64[0];
9804 pu256Dst->au64[1] = pu256Src->au64[1];
9805 pu256Dst->au64[2] = pu256Src->au64[2];
9806 pu256Dst->au64[3] = pu256Src->au64[3];
9807 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9808 }
9809 return rc;
9810}
9811
9812
9813#ifdef IEM_WITH_SETJMP
9814/**
9815 * Fetches a data oword (octo word) at an aligned address, generally AVX
9816 * related, longjmp on error.
9817 *
9818 * Raises \#GP(0) if not aligned.
9819 *
9820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9821 * @param pu256Dst Where to return the qword.
9822 * @param iSegReg The index of the segment register to use for
9823 * this access. The base and limits are checked.
9824 * @param GCPtrMem The address of the guest memory.
9825 */
9826DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9827{
9828 /* The lazy approach for now... */
9829 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9830 if ((GCPtrMem & 31) == 0)
9831 {
9832 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9833 pu256Dst->au64[0] = pu256Src->au64[0];
9834 pu256Dst->au64[1] = pu256Src->au64[1];
9835 pu256Dst->au64[2] = pu256Src->au64[2];
9836 pu256Dst->au64[3] = pu256Src->au64[3];
9837 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9838 return;
9839 }
9840
9841 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9842 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9843}
9844#endif
9845
9846
9847
9848/**
9849 * Fetches a descriptor register (lgdt, lidt).
9850 *
9851 * @returns Strict VBox status code.
9852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9853 * @param pcbLimit Where to return the limit.
9854 * @param pGCPtrBase Where to return the base.
9855 * @param iSegReg The index of the segment register to use for
9856 * this access. The base and limits are checked.
9857 * @param GCPtrMem The address of the guest memory.
9858 * @param enmOpSize The effective operand size.
9859 */
9860IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9861 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9862{
9863 /*
9864 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9865 * little special:
9866 * - The two reads are done separately.
9867 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9868 * - We suspect the 386 to actually commit the limit before the base in
9869 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9870 * don't try emulate this eccentric behavior, because it's not well
9871 * enough understood and rather hard to trigger.
9872 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9873 */
9874 VBOXSTRICTRC rcStrict;
9875 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9876 {
9877 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9878 if (rcStrict == VINF_SUCCESS)
9879 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9880 }
9881 else
9882 {
9883 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9884 if (enmOpSize == IEMMODE_32BIT)
9885 {
9886 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9887 {
9888 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9889 if (rcStrict == VINF_SUCCESS)
9890 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9891 }
9892 else
9893 {
9894 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9895 if (rcStrict == VINF_SUCCESS)
9896 {
9897 *pcbLimit = (uint16_t)uTmp;
9898 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9899 }
9900 }
9901 if (rcStrict == VINF_SUCCESS)
9902 *pGCPtrBase = uTmp;
9903 }
9904 else
9905 {
9906 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9907 if (rcStrict == VINF_SUCCESS)
9908 {
9909 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9910 if (rcStrict == VINF_SUCCESS)
9911 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9912 }
9913 }
9914 }
9915 return rcStrict;
9916}
9917
9918
9919
9920/**
9921 * Stores a data byte.
9922 *
9923 * @returns Strict VBox status code.
9924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9925 * @param iSegReg The index of the segment register to use for
9926 * this access. The base and limits are checked.
9927 * @param GCPtrMem The address of the guest memory.
9928 * @param u8Value The value to store.
9929 */
9930IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9931{
9932 /* The lazy approach for now... */
9933 uint8_t *pu8Dst;
9934 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9935 if (rc == VINF_SUCCESS)
9936 {
9937 *pu8Dst = u8Value;
9938 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9939 }
9940 return rc;
9941}
9942
9943
9944#ifdef IEM_WITH_SETJMP
9945/**
9946 * Stores a data byte, longjmp on error.
9947 *
9948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9949 * @param iSegReg The index of the segment register to use for
9950 * this access. The base and limits are checked.
9951 * @param GCPtrMem The address of the guest memory.
9952 * @param u8Value The value to store.
9953 */
9954IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9955{
9956 /* The lazy approach for now... */
9957 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9958 *pu8Dst = u8Value;
9959 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9960}
9961#endif
9962
9963
9964/**
9965 * Stores a data word.
9966 *
9967 * @returns Strict VBox status code.
9968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9969 * @param iSegReg The index of the segment register to use for
9970 * this access. The base and limits are checked.
9971 * @param GCPtrMem The address of the guest memory.
9972 * @param u16Value The value to store.
9973 */
9974IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9975{
9976 /* The lazy approach for now... */
9977 uint16_t *pu16Dst;
9978 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9979 if (rc == VINF_SUCCESS)
9980 {
9981 *pu16Dst = u16Value;
9982 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9983 }
9984 return rc;
9985}
9986
9987
9988#ifdef IEM_WITH_SETJMP
9989/**
9990 * Stores a data word, longjmp on error.
9991 *
9992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9993 * @param iSegReg The index of the segment register to use for
9994 * this access. The base and limits are checked.
9995 * @param GCPtrMem The address of the guest memory.
9996 * @param u16Value The value to store.
9997 */
9998IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9999{
10000 /* The lazy approach for now... */
10001 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10002 *pu16Dst = u16Value;
10003 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10004}
10005#endif
10006
10007
10008/**
10009 * Stores a data dword.
10010 *
10011 * @returns Strict VBox status code.
10012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10013 * @param iSegReg The index of the segment register to use for
10014 * this access. The base and limits are checked.
10015 * @param GCPtrMem The address of the guest memory.
10016 * @param u32Value The value to store.
10017 */
10018IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10019{
10020 /* The lazy approach for now... */
10021 uint32_t *pu32Dst;
10022 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10023 if (rc == VINF_SUCCESS)
10024 {
10025 *pu32Dst = u32Value;
10026 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10027 }
10028 return rc;
10029}
10030
10031
10032#ifdef IEM_WITH_SETJMP
10033/**
10034 * Stores a data dword.
10035 *
10036 * @returns Strict VBox status code.
10037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10038 * @param iSegReg The index of the segment register to use for
10039 * this access. The base and limits are checked.
10040 * @param GCPtrMem The address of the guest memory.
10041 * @param u32Value The value to store.
10042 */
10043IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10044{
10045 /* The lazy approach for now... */
10046 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10047 *pu32Dst = u32Value;
10048 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10049}
10050#endif
10051
10052
10053/**
10054 * Stores a data qword.
10055 *
10056 * @returns Strict VBox status code.
10057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10058 * @param iSegReg The index of the segment register to use for
10059 * this access. The base and limits are checked.
10060 * @param GCPtrMem The address of the guest memory.
10061 * @param u64Value The value to store.
10062 */
10063IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10064{
10065 /* The lazy approach for now... */
10066 uint64_t *pu64Dst;
10067 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10068 if (rc == VINF_SUCCESS)
10069 {
10070 *pu64Dst = u64Value;
10071 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10072 }
10073 return rc;
10074}
10075
10076
10077#ifdef IEM_WITH_SETJMP
10078/**
10079 * Stores a data qword, longjmp on error.
10080 *
10081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10082 * @param iSegReg The index of the segment register to use for
10083 * this access. The base and limits are checked.
10084 * @param GCPtrMem The address of the guest memory.
10085 * @param u64Value The value to store.
10086 */
10087IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10088{
10089 /* The lazy approach for now... */
10090 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10091 *pu64Dst = u64Value;
10092 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10093}
10094#endif
10095
10096
10097/**
10098 * Stores a data dqword.
10099 *
10100 * @returns Strict VBox status code.
10101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10102 * @param iSegReg The index of the segment register to use for
10103 * this access. The base and limits are checked.
10104 * @param GCPtrMem The address of the guest memory.
10105 * @param u128Value The value to store.
10106 */
10107IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10108{
10109 /* The lazy approach for now... */
10110 PRTUINT128U pu128Dst;
10111 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10112 if (rc == VINF_SUCCESS)
10113 {
10114 pu128Dst->au64[0] = u128Value.au64[0];
10115 pu128Dst->au64[1] = u128Value.au64[1];
10116 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10117 }
10118 return rc;
10119}
10120
10121
10122#ifdef IEM_WITH_SETJMP
10123/**
10124 * Stores a data dqword, longjmp on error.
10125 *
10126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10127 * @param iSegReg The index of the segment register to use for
10128 * this access. The base and limits are checked.
10129 * @param GCPtrMem The address of the guest memory.
10130 * @param u128Value The value to store.
10131 */
10132IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10133{
10134 /* The lazy approach for now... */
10135 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10136 pu128Dst->au64[0] = u128Value.au64[0];
10137 pu128Dst->au64[1] = u128Value.au64[1];
10138 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10139}
10140#endif
10141
10142
10143/**
10144 * Stores a data dqword, SSE aligned.
10145 *
10146 * @returns Strict VBox status code.
10147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10148 * @param iSegReg The index of the segment register to use for
10149 * this access. The base and limits are checked.
10150 * @param GCPtrMem The address of the guest memory.
10151 * @param u128Value The value to store.
10152 */
10153IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10154{
10155 /* The lazy approach for now... */
10156 if ( (GCPtrMem & 15)
10157 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10158 return iemRaiseGeneralProtectionFault0(pVCpu);
10159
10160 PRTUINT128U pu128Dst;
10161 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10162 if (rc == VINF_SUCCESS)
10163 {
10164 pu128Dst->au64[0] = u128Value.au64[0];
10165 pu128Dst->au64[1] = u128Value.au64[1];
10166 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10167 }
10168 return rc;
10169}
10170
10171
10172#ifdef IEM_WITH_SETJMP
10173/**
10174 * Stores a data dqword, SSE aligned.
10175 *
10176 * @returns Strict VBox status code.
10177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10178 * @param iSegReg The index of the segment register to use for
10179 * this access. The base and limits are checked.
10180 * @param GCPtrMem The address of the guest memory.
10181 * @param u128Value The value to store.
10182 */
10183DECL_NO_INLINE(IEM_STATIC, void)
10184iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10185{
10186 /* The lazy approach for now... */
10187 if ( (GCPtrMem & 15) == 0
10188 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10189 {
10190 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10191 pu128Dst->au64[0] = u128Value.au64[0];
10192 pu128Dst->au64[1] = u128Value.au64[1];
10193 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10194 return;
10195 }
10196
10197 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10198 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10199}
10200#endif
10201
10202
10203/**
10204 * Stores a data dqword.
10205 *
10206 * @returns Strict VBox status code.
10207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10208 * @param iSegReg The index of the segment register to use for
10209 * this access. The base and limits are checked.
10210 * @param GCPtrMem The address of the guest memory.
10211 * @param pu256Value Pointer to the value to store.
10212 */
10213IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10214{
10215 /* The lazy approach for now... */
10216 PRTUINT256U pu256Dst;
10217 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10218 if (rc == VINF_SUCCESS)
10219 {
10220 pu256Dst->au64[0] = pu256Value->au64[0];
10221 pu256Dst->au64[1] = pu256Value->au64[1];
10222 pu256Dst->au64[2] = pu256Value->au64[2];
10223 pu256Dst->au64[3] = pu256Value->au64[3];
10224 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10225 }
10226 return rc;
10227}
10228
10229
10230#ifdef IEM_WITH_SETJMP
10231/**
10232 * Stores a data dqword, longjmp on error.
10233 *
10234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10235 * @param iSegReg The index of the segment register to use for
10236 * this access. The base and limits are checked.
10237 * @param GCPtrMem The address of the guest memory.
10238 * @param pu256Value Pointer to the value to store.
10239 */
10240IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10241{
10242 /* The lazy approach for now... */
10243 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10244 pu256Dst->au64[0] = pu256Value->au64[0];
10245 pu256Dst->au64[1] = pu256Value->au64[1];
10246 pu256Dst->au64[2] = pu256Value->au64[2];
10247 pu256Dst->au64[3] = pu256Value->au64[3];
10248 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10249}
10250#endif
10251
10252
10253/**
10254 * Stores a data dqword, AVX aligned.
10255 *
10256 * @returns Strict VBox status code.
10257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10258 * @param iSegReg The index of the segment register to use for
10259 * this access. The base and limits are checked.
10260 * @param GCPtrMem The address of the guest memory.
10261 * @param pu256Value Pointer to the value to store.
10262 */
10263IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10264{
10265 /* The lazy approach for now... */
10266 if (GCPtrMem & 31)
10267 return iemRaiseGeneralProtectionFault0(pVCpu);
10268
10269 PRTUINT256U pu256Dst;
10270 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10271 if (rc == VINF_SUCCESS)
10272 {
10273 pu256Dst->au64[0] = pu256Value->au64[0];
10274 pu256Dst->au64[1] = pu256Value->au64[1];
10275 pu256Dst->au64[2] = pu256Value->au64[2];
10276 pu256Dst->au64[3] = pu256Value->au64[3];
10277 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10278 }
10279 return rc;
10280}
10281
10282
10283#ifdef IEM_WITH_SETJMP
10284/**
10285 * Stores a data dqword, AVX aligned.
10286 *
10287 * @returns Strict VBox status code.
10288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10289 * @param iSegReg The index of the segment register to use for
10290 * this access. The base and limits are checked.
10291 * @param GCPtrMem The address of the guest memory.
10292 * @param pu256Value Pointer to the value to store.
10293 */
10294DECL_NO_INLINE(IEM_STATIC, void)
10295iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10296{
10297 /* The lazy approach for now... */
10298 if ((GCPtrMem & 31) == 0)
10299 {
10300 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10301 pu256Dst->au64[0] = pu256Value->au64[0];
10302 pu256Dst->au64[1] = pu256Value->au64[1];
10303 pu256Dst->au64[2] = pu256Value->au64[2];
10304 pu256Dst->au64[3] = pu256Value->au64[3];
10305 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10306 return;
10307 }
10308
10309 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10310 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10311}
10312#endif
10313
10314
10315/**
10316 * Stores a descriptor register (sgdt, sidt).
10317 *
10318 * @returns Strict VBox status code.
10319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10320 * @param cbLimit The limit.
10321 * @param GCPtrBase The base address.
10322 * @param iSegReg The index of the segment register to use for
10323 * this access. The base and limits are checked.
10324 * @param GCPtrMem The address of the guest memory.
10325 */
10326IEM_STATIC VBOXSTRICTRC
10327iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10328{
10329 /*
10330 * The SIDT and SGDT instructions actually stores the data using two
10331 * independent writes. The instructions does not respond to opsize prefixes.
10332 */
10333 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10334 if (rcStrict == VINF_SUCCESS)
10335 {
10336 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10337 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10338 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10339 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10340 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10341 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10342 else
10343 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10344 }
10345 return rcStrict;
10346}
10347
10348
10349/**
10350 * Pushes a word onto the stack.
10351 *
10352 * @returns Strict VBox status code.
10353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10354 * @param u16Value The value to push.
10355 */
10356IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10357{
10358 /* Increment the stack pointer. */
10359 uint64_t uNewRsp;
10360 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10361
10362 /* Write the word the lazy way. */
10363 uint16_t *pu16Dst;
10364 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10365 if (rc == VINF_SUCCESS)
10366 {
10367 *pu16Dst = u16Value;
10368 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10369 }
10370
10371 /* Commit the new RSP value unless we an access handler made trouble. */
10372 if (rc == VINF_SUCCESS)
10373 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10374
10375 return rc;
10376}
10377
10378
10379/**
10380 * Pushes a dword onto the stack.
10381 *
10382 * @returns Strict VBox status code.
10383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10384 * @param u32Value The value to push.
10385 */
10386IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10387{
10388 /* Increment the stack pointer. */
10389 uint64_t uNewRsp;
10390 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10391
10392 /* Write the dword the lazy way. */
10393 uint32_t *pu32Dst;
10394 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10395 if (rc == VINF_SUCCESS)
10396 {
10397 *pu32Dst = u32Value;
10398 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10399 }
10400
10401 /* Commit the new RSP value unless we an access handler made trouble. */
10402 if (rc == VINF_SUCCESS)
10403 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10404
10405 return rc;
10406}
10407
10408
10409/**
10410 * Pushes a dword segment register value onto the stack.
10411 *
10412 * @returns Strict VBox status code.
10413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10414 * @param u32Value The value to push.
10415 */
10416IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10417{
10418 /* Increment the stack pointer. */
10419 uint64_t uNewRsp;
10420 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10421
10422 /* The intel docs talks about zero extending the selector register
10423 value. My actual intel CPU here might be zero extending the value
10424 but it still only writes the lower word... */
10425 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10426 * happens when crossing an electric page boundrary, is the high word checked
10427 * for write accessibility or not? Probably it is. What about segment limits?
10428 * It appears this behavior is also shared with trap error codes.
10429 *
10430 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10431 * ancient hardware when it actually did change. */
10432 uint16_t *pu16Dst;
10433 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10434 if (rc == VINF_SUCCESS)
10435 {
10436 *pu16Dst = (uint16_t)u32Value;
10437 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10438 }
10439
10440 /* Commit the new RSP value unless we an access handler made trouble. */
10441 if (rc == VINF_SUCCESS)
10442 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10443
10444 return rc;
10445}
10446
10447
10448/**
10449 * Pushes a qword onto the stack.
10450 *
10451 * @returns Strict VBox status code.
10452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10453 * @param u64Value The value to push.
10454 */
10455IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10456{
10457 /* Increment the stack pointer. */
10458 uint64_t uNewRsp;
10459 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10460
10461 /* Write the word the lazy way. */
10462 uint64_t *pu64Dst;
10463 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10464 if (rc == VINF_SUCCESS)
10465 {
10466 *pu64Dst = u64Value;
10467 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10468 }
10469
10470 /* Commit the new RSP value unless we an access handler made trouble. */
10471 if (rc == VINF_SUCCESS)
10472 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10473
10474 return rc;
10475}
10476
10477
10478/**
10479 * Pops a word from the stack.
10480 *
10481 * @returns Strict VBox status code.
10482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10483 * @param pu16Value Where to store the popped value.
10484 */
10485IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10486{
10487 /* Increment the stack pointer. */
10488 uint64_t uNewRsp;
10489 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10490
10491 /* Write the word the lazy way. */
10492 uint16_t const *pu16Src;
10493 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10494 if (rc == VINF_SUCCESS)
10495 {
10496 *pu16Value = *pu16Src;
10497 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10498
10499 /* Commit the new RSP value. */
10500 if (rc == VINF_SUCCESS)
10501 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10502 }
10503
10504 return rc;
10505}
10506
10507
10508/**
10509 * Pops a dword from the stack.
10510 *
10511 * @returns Strict VBox status code.
10512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10513 * @param pu32Value Where to store the popped value.
10514 */
10515IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10516{
10517 /* Increment the stack pointer. */
10518 uint64_t uNewRsp;
10519 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10520
10521 /* Write the word the lazy way. */
10522 uint32_t const *pu32Src;
10523 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10524 if (rc == VINF_SUCCESS)
10525 {
10526 *pu32Value = *pu32Src;
10527 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10528
10529 /* Commit the new RSP value. */
10530 if (rc == VINF_SUCCESS)
10531 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10532 }
10533
10534 return rc;
10535}
10536
10537
10538/**
10539 * Pops a qword from the stack.
10540 *
10541 * @returns Strict VBox status code.
10542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10543 * @param pu64Value Where to store the popped value.
10544 */
10545IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10546{
10547 /* Increment the stack pointer. */
10548 uint64_t uNewRsp;
10549 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10550
10551 /* Write the word the lazy way. */
10552 uint64_t const *pu64Src;
10553 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10554 if (rc == VINF_SUCCESS)
10555 {
10556 *pu64Value = *pu64Src;
10557 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10558
10559 /* Commit the new RSP value. */
10560 if (rc == VINF_SUCCESS)
10561 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10562 }
10563
10564 return rc;
10565}
10566
10567
10568/**
10569 * Pushes a word onto the stack, using a temporary stack pointer.
10570 *
10571 * @returns Strict VBox status code.
10572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10573 * @param u16Value The value to push.
10574 * @param pTmpRsp Pointer to the temporary stack pointer.
10575 */
10576IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10577{
10578 /* Increment the stack pointer. */
10579 RTUINT64U NewRsp = *pTmpRsp;
10580 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10581
10582 /* Write the word the lazy way. */
10583 uint16_t *pu16Dst;
10584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10585 if (rc == VINF_SUCCESS)
10586 {
10587 *pu16Dst = u16Value;
10588 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10589 }
10590
10591 /* Commit the new RSP value unless we an access handler made trouble. */
10592 if (rc == VINF_SUCCESS)
10593 *pTmpRsp = NewRsp;
10594
10595 return rc;
10596}
10597
10598
10599/**
10600 * Pushes a dword onto the stack, using a temporary stack pointer.
10601 *
10602 * @returns Strict VBox status code.
10603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10604 * @param u32Value The value to push.
10605 * @param pTmpRsp Pointer to the temporary stack pointer.
10606 */
10607IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10608{
10609 /* Increment the stack pointer. */
10610 RTUINT64U NewRsp = *pTmpRsp;
10611 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10612
10613 /* Write the word the lazy way. */
10614 uint32_t *pu32Dst;
10615 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10616 if (rc == VINF_SUCCESS)
10617 {
10618 *pu32Dst = u32Value;
10619 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10620 }
10621
10622 /* Commit the new RSP value unless we an access handler made trouble. */
10623 if (rc == VINF_SUCCESS)
10624 *pTmpRsp = NewRsp;
10625
10626 return rc;
10627}
10628
10629
10630/**
10631 * Pushes a dword onto the stack, using a temporary stack pointer.
10632 *
10633 * @returns Strict VBox status code.
10634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10635 * @param u64Value The value to push.
10636 * @param pTmpRsp Pointer to the temporary stack pointer.
10637 */
10638IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10639{
10640 /* Increment the stack pointer. */
10641 RTUINT64U NewRsp = *pTmpRsp;
10642 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10643
10644 /* Write the word the lazy way. */
10645 uint64_t *pu64Dst;
10646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10647 if (rc == VINF_SUCCESS)
10648 {
10649 *pu64Dst = u64Value;
10650 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10651 }
10652
10653 /* Commit the new RSP value unless we an access handler made trouble. */
10654 if (rc == VINF_SUCCESS)
10655 *pTmpRsp = NewRsp;
10656
10657 return rc;
10658}
10659
10660
10661/**
10662 * Pops a word from the stack, using a temporary stack pointer.
10663 *
10664 * @returns Strict VBox status code.
10665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10666 * @param pu16Value Where to store the popped value.
10667 * @param pTmpRsp Pointer to the temporary stack pointer.
10668 */
10669IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10670{
10671 /* Increment the stack pointer. */
10672 RTUINT64U NewRsp = *pTmpRsp;
10673 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10674
10675 /* Write the word the lazy way. */
10676 uint16_t const *pu16Src;
10677 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10678 if (rc == VINF_SUCCESS)
10679 {
10680 *pu16Value = *pu16Src;
10681 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10682
10683 /* Commit the new RSP value. */
10684 if (rc == VINF_SUCCESS)
10685 *pTmpRsp = NewRsp;
10686 }
10687
10688 return rc;
10689}
10690
10691
10692/**
10693 * Pops a dword from the stack, using a temporary stack pointer.
10694 *
10695 * @returns Strict VBox status code.
10696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10697 * @param pu32Value Where to store the popped value.
10698 * @param pTmpRsp Pointer to the temporary stack pointer.
10699 */
10700IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10701{
10702 /* Increment the stack pointer. */
10703 RTUINT64U NewRsp = *pTmpRsp;
10704 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10705
10706 /* Write the word the lazy way. */
10707 uint32_t const *pu32Src;
10708 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10709 if (rc == VINF_SUCCESS)
10710 {
10711 *pu32Value = *pu32Src;
10712 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10713
10714 /* Commit the new RSP value. */
10715 if (rc == VINF_SUCCESS)
10716 *pTmpRsp = NewRsp;
10717 }
10718
10719 return rc;
10720}
10721
10722
10723/**
10724 * Pops a qword from the stack, using a temporary stack pointer.
10725 *
10726 * @returns Strict VBox status code.
10727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10728 * @param pu64Value Where to store the popped value.
10729 * @param pTmpRsp Pointer to the temporary stack pointer.
10730 */
10731IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10732{
10733 /* Increment the stack pointer. */
10734 RTUINT64U NewRsp = *pTmpRsp;
10735 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10736
10737 /* Write the word the lazy way. */
10738 uint64_t const *pu64Src;
10739 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10740 if (rcStrict == VINF_SUCCESS)
10741 {
10742 *pu64Value = *pu64Src;
10743 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10744
10745 /* Commit the new RSP value. */
10746 if (rcStrict == VINF_SUCCESS)
10747 *pTmpRsp = NewRsp;
10748 }
10749
10750 return rcStrict;
10751}
10752
10753
10754/**
10755 * Begin a special stack push (used by interrupt, exceptions and such).
10756 *
10757 * This will raise \#SS or \#PF if appropriate.
10758 *
10759 * @returns Strict VBox status code.
10760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10761 * @param cbMem The number of bytes to push onto the stack.
10762 * @param ppvMem Where to return the pointer to the stack memory.
10763 * As with the other memory functions this could be
10764 * direct access or bounce buffered access, so
10765 * don't commit register until the commit call
10766 * succeeds.
10767 * @param puNewRsp Where to return the new RSP value. This must be
10768 * passed unchanged to
10769 * iemMemStackPushCommitSpecial().
10770 */
10771IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10772{
10773 Assert(cbMem < UINT8_MAX);
10774 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10775 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10776}
10777
10778
10779/**
10780 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10781 *
10782 * This will update the rSP.
10783 *
10784 * @returns Strict VBox status code.
10785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10786 * @param pvMem The pointer returned by
10787 * iemMemStackPushBeginSpecial().
10788 * @param uNewRsp The new RSP value returned by
10789 * iemMemStackPushBeginSpecial().
10790 */
10791IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10792{
10793 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10794 if (rcStrict == VINF_SUCCESS)
10795 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10796 return rcStrict;
10797}
10798
10799
10800/**
10801 * Begin a special stack pop (used by iret, retf and such).
10802 *
10803 * This will raise \#SS or \#PF if appropriate.
10804 *
10805 * @returns Strict VBox status code.
10806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10807 * @param cbMem The number of bytes to pop from the stack.
10808 * @param ppvMem Where to return the pointer to the stack memory.
10809 * @param puNewRsp Where to return the new RSP value. This must be
10810 * assigned to CPUMCTX::rsp manually some time
10811 * after iemMemStackPopDoneSpecial() has been
10812 * called.
10813 */
10814IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10815{
10816 Assert(cbMem < UINT8_MAX);
10817 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10818 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10819}
10820
10821
10822/**
10823 * Continue a special stack pop (used by iret and retf).
10824 *
10825 * This will raise \#SS or \#PF if appropriate.
10826 *
10827 * @returns Strict VBox status code.
10828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10829 * @param cbMem The number of bytes to pop from the stack.
10830 * @param ppvMem Where to return the pointer to the stack memory.
10831 * @param puNewRsp Where to return the new RSP value. This must be
10832 * assigned to CPUMCTX::rsp manually some time
10833 * after iemMemStackPopDoneSpecial() has been
10834 * called.
10835 */
10836IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10837{
10838 Assert(cbMem < UINT8_MAX);
10839 RTUINT64U NewRsp;
10840 NewRsp.u = *puNewRsp;
10841 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10842 *puNewRsp = NewRsp.u;
10843 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10844}
10845
10846
10847/**
10848 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10849 * iemMemStackPopContinueSpecial).
10850 *
10851 * The caller will manually commit the rSP.
10852 *
10853 * @returns Strict VBox status code.
10854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10855 * @param pvMem The pointer returned by
10856 * iemMemStackPopBeginSpecial() or
10857 * iemMemStackPopContinueSpecial().
10858 */
10859IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10860{
10861 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10862}
10863
10864
10865/**
10866 * Fetches a system table byte.
10867 *
10868 * @returns Strict VBox status code.
10869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10870 * @param pbDst Where to return the byte.
10871 * @param iSegReg The index of the segment register to use for
10872 * this access. The base and limits are checked.
10873 * @param GCPtrMem The address of the guest memory.
10874 */
10875IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10876{
10877 /* The lazy approach for now... */
10878 uint8_t const *pbSrc;
10879 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10880 if (rc == VINF_SUCCESS)
10881 {
10882 *pbDst = *pbSrc;
10883 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10884 }
10885 return rc;
10886}
10887
10888
10889/**
10890 * Fetches a system table word.
10891 *
10892 * @returns Strict VBox status code.
10893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10894 * @param pu16Dst Where to return the word.
10895 * @param iSegReg The index of the segment register to use for
10896 * this access. The base and limits are checked.
10897 * @param GCPtrMem The address of the guest memory.
10898 */
10899IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10900{
10901 /* The lazy approach for now... */
10902 uint16_t const *pu16Src;
10903 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10904 if (rc == VINF_SUCCESS)
10905 {
10906 *pu16Dst = *pu16Src;
10907 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10908 }
10909 return rc;
10910}
10911
10912
10913/**
10914 * Fetches a system table dword.
10915 *
10916 * @returns Strict VBox status code.
10917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10918 * @param pu32Dst Where to return the dword.
10919 * @param iSegReg The index of the segment register to use for
10920 * this access. The base and limits are checked.
10921 * @param GCPtrMem The address of the guest memory.
10922 */
10923IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10924{
10925 /* The lazy approach for now... */
10926 uint32_t const *pu32Src;
10927 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10928 if (rc == VINF_SUCCESS)
10929 {
10930 *pu32Dst = *pu32Src;
10931 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10932 }
10933 return rc;
10934}
10935
10936
10937/**
10938 * Fetches a system table qword.
10939 *
10940 * @returns Strict VBox status code.
10941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10942 * @param pu64Dst Where to return the qword.
10943 * @param iSegReg The index of the segment register to use for
10944 * this access. The base and limits are checked.
10945 * @param GCPtrMem The address of the guest memory.
10946 */
10947IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10948{
10949 /* The lazy approach for now... */
10950 uint64_t const *pu64Src;
10951 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10952 if (rc == VINF_SUCCESS)
10953 {
10954 *pu64Dst = *pu64Src;
10955 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10956 }
10957 return rc;
10958}
10959
10960
10961/**
10962 * Fetches a descriptor table entry with caller specified error code.
10963 *
10964 * @returns Strict VBox status code.
10965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10966 * @param pDesc Where to return the descriptor table entry.
10967 * @param uSel The selector which table entry to fetch.
10968 * @param uXcpt The exception to raise on table lookup error.
10969 * @param uErrorCode The error code associated with the exception.
10970 */
10971IEM_STATIC VBOXSTRICTRC
10972iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10973{
10974 AssertPtr(pDesc);
10975 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10976
10977 /** @todo did the 286 require all 8 bytes to be accessible? */
10978 /*
10979 * Get the selector table base and check bounds.
10980 */
10981 RTGCPTR GCPtrBase;
10982 if (uSel & X86_SEL_LDT)
10983 {
10984 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10985 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10986 {
10987 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10988 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10989 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10990 uErrorCode, 0);
10991 }
10992
10993 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10994 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10995 }
10996 else
10997 {
10998 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10999 {
11000 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11001 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11002 uErrorCode, 0);
11003 }
11004 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11005 }
11006
11007 /*
11008 * Read the legacy descriptor and maybe the long mode extensions if
11009 * required.
11010 */
11011 VBOXSTRICTRC rcStrict;
11012 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11013 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11014 else
11015 {
11016 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11017 if (rcStrict == VINF_SUCCESS)
11018 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11019 if (rcStrict == VINF_SUCCESS)
11020 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11021 if (rcStrict == VINF_SUCCESS)
11022 pDesc->Legacy.au16[3] = 0;
11023 else
11024 return rcStrict;
11025 }
11026
11027 if (rcStrict == VINF_SUCCESS)
11028 {
11029 if ( !IEM_IS_LONG_MODE(pVCpu)
11030 || pDesc->Legacy.Gen.u1DescType)
11031 pDesc->Long.au64[1] = 0;
11032 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11033 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11034 else
11035 {
11036 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11037 /** @todo is this the right exception? */
11038 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11039 }
11040 }
11041 return rcStrict;
11042}
11043
11044
11045/**
11046 * Fetches a descriptor table entry.
11047 *
11048 * @returns Strict VBox status code.
11049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11050 * @param pDesc Where to return the descriptor table entry.
11051 * @param uSel The selector which table entry to fetch.
11052 * @param uXcpt The exception to raise on table lookup error.
11053 */
11054IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11055{
11056 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11057}
11058
11059
11060/**
11061 * Fakes a long mode stack selector for SS = 0.
11062 *
11063 * @param pDescSs Where to return the fake stack descriptor.
11064 * @param uDpl The DPL we want.
11065 */
11066IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11067{
11068 pDescSs->Long.au64[0] = 0;
11069 pDescSs->Long.au64[1] = 0;
11070 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11071 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11072 pDescSs->Long.Gen.u2Dpl = uDpl;
11073 pDescSs->Long.Gen.u1Present = 1;
11074 pDescSs->Long.Gen.u1Long = 1;
11075}
11076
11077
11078/**
11079 * Marks the selector descriptor as accessed (only non-system descriptors).
11080 *
11081 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11082 * will therefore skip the limit checks.
11083 *
11084 * @returns Strict VBox status code.
11085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11086 * @param uSel The selector.
11087 */
11088IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11089{
11090 /*
11091 * Get the selector table base and calculate the entry address.
11092 */
11093 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11094 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11095 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11096 GCPtr += uSel & X86_SEL_MASK;
11097
11098 /*
11099 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11100 * ugly stuff to avoid this. This will make sure it's an atomic access
11101 * as well more or less remove any question about 8-bit or 32-bit accesss.
11102 */
11103 VBOXSTRICTRC rcStrict;
11104 uint32_t volatile *pu32;
11105 if ((GCPtr & 3) == 0)
11106 {
11107 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11108 GCPtr += 2 + 2;
11109 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11110 if (rcStrict != VINF_SUCCESS)
11111 return rcStrict;
11112 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11113 }
11114 else
11115 {
11116 /* The misaligned GDT/LDT case, map the whole thing. */
11117 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11118 if (rcStrict != VINF_SUCCESS)
11119 return rcStrict;
11120 switch ((uintptr_t)pu32 & 3)
11121 {
11122 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11123 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11124 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11125 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11126 }
11127 }
11128
11129 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11130}
11131
11132/** @} */
11133
11134
11135/*
11136 * Include the C/C++ implementation of instruction.
11137 */
11138#include "IEMAllCImpl.cpp.h"
11139
11140
11141
11142/** @name "Microcode" macros.
11143 *
11144 * The idea is that we should be able to use the same code to interpret
11145 * instructions as well as recompiler instructions. Thus this obfuscation.
11146 *
11147 * @{
11148 */
11149#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11150#define IEM_MC_END() }
11151#define IEM_MC_PAUSE() do {} while (0)
11152#define IEM_MC_CONTINUE() do {} while (0)
11153
11154/** Internal macro. */
11155#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11156 do \
11157 { \
11158 VBOXSTRICTRC rcStrict2 = a_Expr; \
11159 if (rcStrict2 != VINF_SUCCESS) \
11160 return rcStrict2; \
11161 } while (0)
11162
11163
11164#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11165#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11166#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11167#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11168#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11169#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11170#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11171#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11172#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11173 do { \
11174 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11175 return iemRaiseDeviceNotAvailable(pVCpu); \
11176 } while (0)
11177#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11178 do { \
11179 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11180 return iemRaiseDeviceNotAvailable(pVCpu); \
11181 } while (0)
11182#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11183 do { \
11184 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11185 return iemRaiseMathFault(pVCpu); \
11186 } while (0)
11187#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11188 do { \
11189 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11190 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11191 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11192 return iemRaiseUndefinedOpcode(pVCpu); \
11193 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11194 return iemRaiseDeviceNotAvailable(pVCpu); \
11195 } while (0)
11196#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11197 do { \
11198 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11199 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11200 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11201 return iemRaiseUndefinedOpcode(pVCpu); \
11202 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11203 return iemRaiseDeviceNotAvailable(pVCpu); \
11204 } while (0)
11205#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11206 do { \
11207 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11208 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11209 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11210 return iemRaiseUndefinedOpcode(pVCpu); \
11211 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11212 return iemRaiseDeviceNotAvailable(pVCpu); \
11213 } while (0)
11214#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11215 do { \
11216 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11217 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11218 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11219 return iemRaiseUndefinedOpcode(pVCpu); \
11220 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11221 return iemRaiseDeviceNotAvailable(pVCpu); \
11222 } while (0)
11223#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11224 do { \
11225 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11226 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11227 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11228 return iemRaiseUndefinedOpcode(pVCpu); \
11229 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11230 return iemRaiseDeviceNotAvailable(pVCpu); \
11231 } while (0)
11232#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11233 do { \
11234 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11235 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11236 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11237 return iemRaiseUndefinedOpcode(pVCpu); \
11238 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11239 return iemRaiseDeviceNotAvailable(pVCpu); \
11240 } while (0)
11241#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11242 do { \
11243 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11244 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11245 return iemRaiseUndefinedOpcode(pVCpu); \
11246 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11247 return iemRaiseDeviceNotAvailable(pVCpu); \
11248 } while (0)
11249#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11250 do { \
11251 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11252 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11253 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11254 return iemRaiseUndefinedOpcode(pVCpu); \
11255 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11256 return iemRaiseDeviceNotAvailable(pVCpu); \
11257 } while (0)
11258#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11259 do { \
11260 if (pVCpu->iem.s.uCpl != 0) \
11261 return iemRaiseGeneralProtectionFault0(pVCpu); \
11262 } while (0)
11263#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11264 do { \
11265 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11266 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11267 } while (0)
11268#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11269 do { \
11270 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11271 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11272 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11273 return iemRaiseUndefinedOpcode(pVCpu); \
11274 } while (0)
11275#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11276 do { \
11277 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11278 return iemRaiseGeneralProtectionFault0(pVCpu); \
11279 } while (0)
11280
11281
11282#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11283#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11284#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11285#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11286#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11287#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11288#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11289 uint32_t a_Name; \
11290 uint32_t *a_pName = &a_Name
11291#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11292 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11293
11294#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11295#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11296
11297#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11298#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11299#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11300#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11314#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11315 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11316 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11317 } while (0)
11318#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11319 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11320 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11321 } while (0)
11322#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11323 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11324 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11325 } while (0)
11326/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11327#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11328 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11329 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11330 } while (0)
11331#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11332 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11333 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11334 } while (0)
11335/** @note Not for IOPL or IF testing or modification. */
11336#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11337#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11338#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11339#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11340
11341#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11342#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11343#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11344#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11345#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11346#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11347#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11348#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11349#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11350#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11351/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11352#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11353 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11354 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11355 } while (0)
11356#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11357 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11358 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11359 } while (0)
11360#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11361 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11362
11363
11364#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11365#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11366/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11367 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11368#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11369#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11370/** @note Not for IOPL or IF testing or modification. */
11371#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11372
11373#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11374#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11375#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11376 do { \
11377 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11378 *pu32Reg += (a_u32Value); \
11379 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11380 } while (0)
11381#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11382
11383#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11384#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11385#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11386 do { \
11387 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11388 *pu32Reg -= (a_u32Value); \
11389 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11390 } while (0)
11391#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11392#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11393
11394#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11395#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11396#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11397#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11398#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11399#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11400#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11401
11402#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11403#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11404#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11405#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11406
11407#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11408#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11409#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11410
11411#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11412#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11413#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11414
11415#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11416#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11417#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11418
11419#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11420#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11421#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11422
11423#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11424
11425#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11426
11427#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11428#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11429#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11430 do { \
11431 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11432 *pu32Reg &= (a_u32Value); \
11433 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11434 } while (0)
11435#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11436
11437#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11438#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11439#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11440 do { \
11441 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11442 *pu32Reg |= (a_u32Value); \
11443 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11444 } while (0)
11445#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11446
11447
11448/** @note Not for IOPL or IF modification. */
11449#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11450/** @note Not for IOPL or IF modification. */
11451#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11452/** @note Not for IOPL or IF modification. */
11453#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11454
11455#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11456
11457/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11458#define IEM_MC_FPU_TO_MMX_MODE() do { \
11459 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11460 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11461 } while (0)
11462
11463/** Switches the FPU state from MMX mode (FTW=0xffff). */
11464#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11465 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11466 } while (0)
11467
11468#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11469 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11470#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11471 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11472#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11473 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11474 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11475 } while (0)
11476#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11477 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11478 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11479 } while (0)
11480#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11481 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11482#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11483 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11484#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11485 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11486
11487#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11488 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11489 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11490 } while (0)
11491#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11492 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11493#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11494 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11495#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11496 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11497#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11498 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11499 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11500 } while (0)
11501#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11502 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11503#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11504 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11505 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11506 } while (0)
11507#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11508 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11509#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11510 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11511 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11512 } while (0)
11513#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11514 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11515#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11516 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11517#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11518 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11519#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11520 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11521#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11522 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11523 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11524 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11525 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11526 } while (0)
11527
11528#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11529 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11530 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11531 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11532 } while (0)
11533#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11534 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11535 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11536 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11537 } while (0)
11538#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11539 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11540 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11541 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11542 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11543 } while (0)
11544#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11545 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11546 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11547 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11548 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11549 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11550 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11551 } while (0)
11552
11553#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11554#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11555 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11556 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11558 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11559 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11560 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11561 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11562 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11563 } while (0)
11564#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11565 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11566 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11568 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11570 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11571 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11572 } while (0)
11573#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11574 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11575 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11576 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11577 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11578 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11580 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11581 } while (0)
11582#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11583 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11584 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11586 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11589 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11590 } while (0)
11591
11592#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11593 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11594#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11595 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11596#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11597 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11598#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11599 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11600 uintptr_t const iYRegTmp = (a_iYReg); \
11601 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11602 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11603 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11604 } while (0)
11605
11606#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11607 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11608 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11609 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11610 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11611 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11612 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11613 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11614 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11615 } while (0)
11616#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11617 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11618 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11619 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11620 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11621 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11622 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11623 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11624 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11625 } while (0)
11626#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11627 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11628 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11629 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11630 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11631 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11632 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11633 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11634 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11635 } while (0)
11636
11637#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11638 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11639 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11640 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11641 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11642 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11643 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11644 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11645 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11646 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11647 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11648 } while (0)
11649#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11650 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11651 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11652 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11653 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11654 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11655 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11656 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11657 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11658 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11659 } while (0)
11660#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11661 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11662 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11663 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11664 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11665 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11666 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11667 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11668 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11669 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11670 } while (0)
11671#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11672 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11673 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11674 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11675 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11676 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11677 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11678 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11679 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11680 } while (0)
11681
11682#ifndef IEM_WITH_SETJMP
11683# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11685# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11687# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11689#else
11690# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11691 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11692# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11693 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11694# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11695 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11696#endif
11697
11698#ifndef IEM_WITH_SETJMP
11699# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11701# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11703# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11705#else
11706# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11707 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11708# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11709 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11710# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11711 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11712#endif
11713
11714#ifndef IEM_WITH_SETJMP
11715# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11717# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11719# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11721#else
11722# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11723 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11724# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11725 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11726# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11727 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11728#endif
11729
11730#ifdef SOME_UNUSED_FUNCTION
11731# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11733#endif
11734
11735#ifndef IEM_WITH_SETJMP
11736# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11738# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11740# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11742# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11744#else
11745# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11746 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11748 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11749# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11750 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11751# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11752 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11753#endif
11754
11755#ifndef IEM_WITH_SETJMP
11756# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11758# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11760# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11762#else
11763# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11764 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11765# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11766 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11767# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11768 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11769#endif
11770
11771#ifndef IEM_WITH_SETJMP
11772# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11773 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11774# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11776#else
11777# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11778 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11779# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11780 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11781#endif
11782
11783#ifndef IEM_WITH_SETJMP
11784# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11785 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11786# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11787 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11788#else
11789# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11790 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11791# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11792 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11793#endif
11794
11795
11796
11797#ifndef IEM_WITH_SETJMP
11798# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11799 do { \
11800 uint8_t u8Tmp; \
11801 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11802 (a_u16Dst) = u8Tmp; \
11803 } while (0)
11804# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11805 do { \
11806 uint8_t u8Tmp; \
11807 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11808 (a_u32Dst) = u8Tmp; \
11809 } while (0)
11810# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11811 do { \
11812 uint8_t u8Tmp; \
11813 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11814 (a_u64Dst) = u8Tmp; \
11815 } while (0)
11816# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11817 do { \
11818 uint16_t u16Tmp; \
11819 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11820 (a_u32Dst) = u16Tmp; \
11821 } while (0)
11822# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11823 do { \
11824 uint16_t u16Tmp; \
11825 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11826 (a_u64Dst) = u16Tmp; \
11827 } while (0)
11828# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11829 do { \
11830 uint32_t u32Tmp; \
11831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11832 (a_u64Dst) = u32Tmp; \
11833 } while (0)
11834#else /* IEM_WITH_SETJMP */
11835# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11836 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11837# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11838 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11839# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11840 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11841# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11842 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11843# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11844 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11845# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11846 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11847#endif /* IEM_WITH_SETJMP */
11848
11849#ifndef IEM_WITH_SETJMP
11850# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11851 do { \
11852 uint8_t u8Tmp; \
11853 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11854 (a_u16Dst) = (int8_t)u8Tmp; \
11855 } while (0)
11856# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11857 do { \
11858 uint8_t u8Tmp; \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11860 (a_u32Dst) = (int8_t)u8Tmp; \
11861 } while (0)
11862# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11863 do { \
11864 uint8_t u8Tmp; \
11865 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11866 (a_u64Dst) = (int8_t)u8Tmp; \
11867 } while (0)
11868# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11869 do { \
11870 uint16_t u16Tmp; \
11871 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11872 (a_u32Dst) = (int16_t)u16Tmp; \
11873 } while (0)
11874# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11875 do { \
11876 uint16_t u16Tmp; \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11878 (a_u64Dst) = (int16_t)u16Tmp; \
11879 } while (0)
11880# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11881 do { \
11882 uint32_t u32Tmp; \
11883 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11884 (a_u64Dst) = (int32_t)u32Tmp; \
11885 } while (0)
11886#else /* IEM_WITH_SETJMP */
11887# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11888 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11889# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11890 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11891# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11892 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11893# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11894 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11895# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11896 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11897# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11898 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11899#endif /* IEM_WITH_SETJMP */
11900
11901#ifndef IEM_WITH_SETJMP
11902# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11904# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11906# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11908# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11910#else
11911# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11912 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11913# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11914 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11915# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11916 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11917# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11918 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11919#endif
11920
11921#ifndef IEM_WITH_SETJMP
11922# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11923 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11924# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11925 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11926# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11927 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11928# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11929 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11930#else
11931# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11932 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11933# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11934 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11935# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11936 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11937# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11938 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11939#endif
11940
11941#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11942#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11943#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11944#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11945#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11946#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11947#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11948 do { \
11949 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11950 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11951 } while (0)
11952
11953#ifndef IEM_WITH_SETJMP
11954# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11955 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11956# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11957 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11958#else
11959# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11960 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11961# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11962 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11963#endif
11964
11965#ifndef IEM_WITH_SETJMP
11966# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11967 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11968# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11969 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11970#else
11971# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11972 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11973# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11974 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11975#endif
11976
11977
11978#define IEM_MC_PUSH_U16(a_u16Value) \
11979 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11980#define IEM_MC_PUSH_U32(a_u32Value) \
11981 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11982#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11983 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11984#define IEM_MC_PUSH_U64(a_u64Value) \
11985 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11986
11987#define IEM_MC_POP_U16(a_pu16Value) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11989#define IEM_MC_POP_U32(a_pu32Value) \
11990 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11991#define IEM_MC_POP_U64(a_pu64Value) \
11992 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11993
11994/** Maps guest memory for direct or bounce buffered access.
11995 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11996 * @remarks May return.
11997 */
11998#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11999 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12000
12001/** Maps guest memory for direct or bounce buffered access.
12002 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12003 * @remarks May return.
12004 */
12005#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12006 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12007
12008/** Commits the memory and unmaps the guest memory.
12009 * @remarks May return.
12010 */
12011#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12012 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12013
12014/** Commits the memory and unmaps the guest memory unless the FPU status word
12015 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12016 * that would cause FLD not to store.
12017 *
12018 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12019 * store, while \#P will not.
12020 *
12021 * @remarks May in theory return - for now.
12022 */
12023#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12024 do { \
12025 if ( !(a_u16FSW & X86_FSW_ES) \
12026 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12027 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12028 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12029 } while (0)
12030
12031/** Calculate efficient address from R/M. */
12032#ifndef IEM_WITH_SETJMP
12033# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12034 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12035#else
12036# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12037 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12038#endif
12039
12040#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12041#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12042#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12043#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12044#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12045#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12046#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12047
12048/**
12049 * Defers the rest of the instruction emulation to a C implementation routine
12050 * and returns, only taking the standard parameters.
12051 *
12052 * @param a_pfnCImpl The pointer to the C routine.
12053 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12054 */
12055#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12056
12057/**
12058 * Defers the rest of instruction emulation to a C implementation routine and
12059 * returns, taking one argument in addition to the standard ones.
12060 *
12061 * @param a_pfnCImpl The pointer to the C routine.
12062 * @param a0 The argument.
12063 */
12064#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12065
12066/**
12067 * Defers the rest of the instruction emulation to a C implementation routine
12068 * and returns, taking two arguments in addition to the standard ones.
12069 *
12070 * @param a_pfnCImpl The pointer to the C routine.
12071 * @param a0 The first extra argument.
12072 * @param a1 The second extra argument.
12073 */
12074#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12075
12076/**
12077 * Defers the rest of the instruction emulation to a C implementation routine
12078 * and returns, taking three arguments in addition to the standard ones.
12079 *
12080 * @param a_pfnCImpl The pointer to the C routine.
12081 * @param a0 The first extra argument.
12082 * @param a1 The second extra argument.
12083 * @param a2 The third extra argument.
12084 */
12085#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12086
12087/**
12088 * Defers the rest of the instruction emulation to a C implementation routine
12089 * and returns, taking four arguments in addition to the standard ones.
12090 *
12091 * @param a_pfnCImpl The pointer to the C routine.
12092 * @param a0 The first extra argument.
12093 * @param a1 The second extra argument.
12094 * @param a2 The third extra argument.
12095 * @param a3 The fourth extra argument.
12096 */
12097#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12098
12099/**
12100 * Defers the rest of the instruction emulation to a C implementation routine
12101 * and returns, taking two arguments in addition to the standard ones.
12102 *
12103 * @param a_pfnCImpl The pointer to the C routine.
12104 * @param a0 The first extra argument.
12105 * @param a1 The second extra argument.
12106 * @param a2 The third extra argument.
12107 * @param a3 The fourth extra argument.
12108 * @param a4 The fifth extra argument.
12109 */
12110#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12111
12112/**
12113 * Defers the entire instruction emulation to a C implementation routine and
12114 * returns, only taking the standard parameters.
12115 *
12116 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12117 *
12118 * @param a_pfnCImpl The pointer to the C routine.
12119 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12120 */
12121#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12122
12123/**
12124 * Defers the entire instruction emulation to a C implementation routine and
12125 * returns, taking one argument in addition to the standard ones.
12126 *
12127 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12128 *
12129 * @param a_pfnCImpl The pointer to the C routine.
12130 * @param a0 The argument.
12131 */
12132#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12133
12134/**
12135 * Defers the entire instruction emulation to a C implementation routine and
12136 * returns, taking two arguments in addition to the standard ones.
12137 *
12138 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12139 *
12140 * @param a_pfnCImpl The pointer to the C routine.
12141 * @param a0 The first extra argument.
12142 * @param a1 The second extra argument.
12143 */
12144#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12145
12146/**
12147 * Defers the entire instruction emulation to a C implementation routine and
12148 * returns, taking three arguments in addition to the standard ones.
12149 *
12150 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12151 *
12152 * @param a_pfnCImpl The pointer to the C routine.
12153 * @param a0 The first extra argument.
12154 * @param a1 The second extra argument.
12155 * @param a2 The third extra argument.
12156 */
12157#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12158
12159/**
12160 * Calls a FPU assembly implementation taking one visible argument.
12161 *
12162 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12163 * @param a0 The first extra argument.
12164 */
12165#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12166 do { \
12167 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12168 } while (0)
12169
12170/**
12171 * Calls a FPU assembly implementation taking two visible arguments.
12172 *
12173 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12174 * @param a0 The first extra argument.
12175 * @param a1 The second extra argument.
12176 */
12177#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12178 do { \
12179 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12180 } while (0)
12181
12182/**
12183 * Calls a FPU assembly implementation taking three visible arguments.
12184 *
12185 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12186 * @param a0 The first extra argument.
12187 * @param a1 The second extra argument.
12188 * @param a2 The third extra argument.
12189 */
12190#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12191 do { \
12192 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12193 } while (0)
12194
12195#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12196 do { \
12197 (a_FpuData).FSW = (a_FSW); \
12198 (a_FpuData).r80Result = *(a_pr80Value); \
12199 } while (0)
12200
12201/** Pushes FPU result onto the stack. */
12202#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12203 iemFpuPushResult(pVCpu, &a_FpuData)
12204/** Pushes FPU result onto the stack and sets the FPUDP. */
12205#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12206 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12207
12208/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12209#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12210 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12211
12212/** Stores FPU result in a stack register. */
12213#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12214 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12215/** Stores FPU result in a stack register and pops the stack. */
12216#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12217 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12218/** Stores FPU result in a stack register and sets the FPUDP. */
12219#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12220 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12221/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12222 * stack. */
12223#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12224 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12225
12226/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12227#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12228 iemFpuUpdateOpcodeAndIp(pVCpu)
12229/** Free a stack register (for FFREE and FFREEP). */
12230#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12231 iemFpuStackFree(pVCpu, a_iStReg)
12232/** Increment the FPU stack pointer. */
12233#define IEM_MC_FPU_STACK_INC_TOP() \
12234 iemFpuStackIncTop(pVCpu)
12235/** Decrement the FPU stack pointer. */
12236#define IEM_MC_FPU_STACK_DEC_TOP() \
12237 iemFpuStackDecTop(pVCpu)
12238
12239/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12240#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12241 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12242/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12243#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12244 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12245/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12246#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12247 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12248/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12249#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12250 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12251/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12252 * stack. */
12253#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12254 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12255/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12256#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12257 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12258
12259/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12260#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12261 iemFpuStackUnderflow(pVCpu, a_iStDst)
12262/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12263 * stack. */
12264#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12265 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12266/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12267 * FPUDS. */
12268#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12269 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12270/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12271 * FPUDS. Pops stack. */
12272#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12273 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12274/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12275 * stack twice. */
12276#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12277 iemFpuStackUnderflowThenPopPop(pVCpu)
12278/** Raises a FPU stack underflow exception for an instruction pushing a result
12279 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12280#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12281 iemFpuStackPushUnderflow(pVCpu)
12282/** Raises a FPU stack underflow exception for an instruction pushing a result
12283 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12284#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12285 iemFpuStackPushUnderflowTwo(pVCpu)
12286
12287/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12288 * FPUIP, FPUCS and FOP. */
12289#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12290 iemFpuStackPushOverflow(pVCpu)
12291/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12292 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12293#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12294 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12295/** Prepares for using the FPU state.
12296 * Ensures that we can use the host FPU in the current context (RC+R0.
12297 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12298#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12299/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12300#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12301/** Actualizes the guest FPU state so it can be accessed and modified. */
12302#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12303
12304/** Prepares for using the SSE state.
12305 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12306 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12307#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12308/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12309#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12310/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12311#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12312
12313/** Prepares for using the AVX state.
12314 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12315 * Ensures the guest AVX state in the CPUMCTX is up to date.
12316 * @note This will include the AVX512 state too when support for it is added
12317 * due to the zero extending feature of VEX instruction. */
12318#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12319/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12320#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12321/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12322#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12323
12324/**
12325 * Calls a MMX assembly implementation taking two visible arguments.
12326 *
12327 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12328 * @param a0 The first extra argument.
12329 * @param a1 The second extra argument.
12330 */
12331#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12332 do { \
12333 IEM_MC_PREPARE_FPU_USAGE(); \
12334 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12335 } while (0)
12336
12337/**
12338 * Calls a MMX assembly implementation taking three visible arguments.
12339 *
12340 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12341 * @param a0 The first extra argument.
12342 * @param a1 The second extra argument.
12343 * @param a2 The third extra argument.
12344 */
12345#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12346 do { \
12347 IEM_MC_PREPARE_FPU_USAGE(); \
12348 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12349 } while (0)
12350
12351
12352/**
12353 * Calls a SSE assembly implementation taking two visible arguments.
12354 *
12355 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12356 * @param a0 The first extra argument.
12357 * @param a1 The second extra argument.
12358 */
12359#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12360 do { \
12361 IEM_MC_PREPARE_SSE_USAGE(); \
12362 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12363 } while (0)
12364
12365/**
12366 * Calls a SSE assembly implementation taking three visible arguments.
12367 *
12368 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12369 * @param a0 The first extra argument.
12370 * @param a1 The second extra argument.
12371 * @param a2 The third extra argument.
12372 */
12373#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12374 do { \
12375 IEM_MC_PREPARE_SSE_USAGE(); \
12376 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12377 } while (0)
12378
12379
12380/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12381 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12382#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12383 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12384
12385/**
12386 * Calls a AVX assembly implementation taking two visible arguments.
12387 *
12388 * There is one implicit zero'th argument, a pointer to the extended state.
12389 *
12390 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12391 * @param a1 The first extra argument.
12392 * @param a2 The second extra argument.
12393 */
12394#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12395 do { \
12396 IEM_MC_PREPARE_AVX_USAGE(); \
12397 a_pfnAImpl(pXState, (a1), (a2)); \
12398 } while (0)
12399
12400/**
12401 * Calls a AVX assembly implementation taking three visible arguments.
12402 *
12403 * There is one implicit zero'th argument, a pointer to the extended state.
12404 *
12405 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12406 * @param a1 The first extra argument.
12407 * @param a2 The second extra argument.
12408 * @param a3 The third extra argument.
12409 */
12410#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12411 do { \
12412 IEM_MC_PREPARE_AVX_USAGE(); \
12413 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12414 } while (0)
12415
12416/** @note Not for IOPL or IF testing. */
12417#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12418/** @note Not for IOPL or IF testing. */
12419#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12420/** @note Not for IOPL or IF testing. */
12421#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12422/** @note Not for IOPL or IF testing. */
12423#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12424/** @note Not for IOPL or IF testing. */
12425#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12426 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12427 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12428/** @note Not for IOPL or IF testing. */
12429#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12430 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12431 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12432/** @note Not for IOPL or IF testing. */
12433#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12434 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12435 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12436 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12437/** @note Not for IOPL or IF testing. */
12438#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12439 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12440 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12441 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12442#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12443#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12444#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12445/** @note Not for IOPL or IF testing. */
12446#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12447 if ( pVCpu->cpum.GstCtx.cx != 0 \
12448 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12449/** @note Not for IOPL or IF testing. */
12450#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12451 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12452 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12453/** @note Not for IOPL or IF testing. */
12454#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12455 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12456 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12457/** @note Not for IOPL or IF testing. */
12458#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12459 if ( pVCpu->cpum.GstCtx.cx != 0 \
12460 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12461/** @note Not for IOPL or IF testing. */
12462#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12463 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12464 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12465/** @note Not for IOPL or IF testing. */
12466#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12467 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12468 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12469#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12470#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12471
12472#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12473 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12474#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12475 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12476#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12477 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12478#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12479 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12480#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12481 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12482#define IEM_MC_IF_FCW_IM() \
12483 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12484
12485#define IEM_MC_ELSE() } else {
12486#define IEM_MC_ENDIF() } do {} while (0)
12487
12488/** @} */
12489
12490
12491/** @name Opcode Debug Helpers.
12492 * @{
12493 */
12494#ifdef VBOX_WITH_STATISTICS
12495# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12496#else
12497# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12498#endif
12499
12500#ifdef DEBUG
12501# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12502 do { \
12503 IEMOP_INC_STATS(a_Stats); \
12504 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12505 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12506 } while (0)
12507
12508# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12509 do { \
12510 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12511 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12512 (void)RT_CONCAT(OP_,a_Upper); \
12513 (void)(a_fDisHints); \
12514 (void)(a_fIemHints); \
12515 } while (0)
12516
12517# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12518 do { \
12519 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12520 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12521 (void)RT_CONCAT(OP_,a_Upper); \
12522 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12523 (void)(a_fDisHints); \
12524 (void)(a_fIemHints); \
12525 } while (0)
12526
12527# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12528 do { \
12529 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12530 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12531 (void)RT_CONCAT(OP_,a_Upper); \
12532 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12533 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12534 (void)(a_fDisHints); \
12535 (void)(a_fIemHints); \
12536 } while (0)
12537
12538# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12539 do { \
12540 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12541 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12542 (void)RT_CONCAT(OP_,a_Upper); \
12543 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12544 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12545 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12546 (void)(a_fDisHints); \
12547 (void)(a_fIemHints); \
12548 } while (0)
12549
12550# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12551 do { \
12552 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12553 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12554 (void)RT_CONCAT(OP_,a_Upper); \
12555 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12556 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12557 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12558 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12559 (void)(a_fDisHints); \
12560 (void)(a_fIemHints); \
12561 } while (0)
12562
12563#else
12564# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12565
12566# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12567 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12568# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12569 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12570# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12571 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12572# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12573 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12574# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12575 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12576
12577#endif
12578
12579#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12580 IEMOP_MNEMONIC0EX(a_Lower, \
12581 #a_Lower, \
12582 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12583#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12584 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12585 #a_Lower " " #a_Op1, \
12586 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12587#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12588 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12589 #a_Lower " " #a_Op1 "," #a_Op2, \
12590 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12591#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12592 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12593 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12594 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12595#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12596 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12597 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12598 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12599
12600/** @} */
12601
12602
12603/** @name Opcode Helpers.
12604 * @{
12605 */
12606
12607#ifdef IN_RING3
12608# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12609 do { \
12610 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12611 else \
12612 { \
12613 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12614 return IEMOP_RAISE_INVALID_OPCODE(); \
12615 } \
12616 } while (0)
12617#else
12618# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12619 do { \
12620 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12621 else return IEMOP_RAISE_INVALID_OPCODE(); \
12622 } while (0)
12623#endif
12624
12625/** The instruction requires a 186 or later. */
12626#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12627# define IEMOP_HLP_MIN_186() do { } while (0)
12628#else
12629# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12630#endif
12631
12632/** The instruction requires a 286 or later. */
12633#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12634# define IEMOP_HLP_MIN_286() do { } while (0)
12635#else
12636# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12637#endif
12638
12639/** The instruction requires a 386 or later. */
12640#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12641# define IEMOP_HLP_MIN_386() do { } while (0)
12642#else
12643# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12644#endif
12645
12646/** The instruction requires a 386 or later if the given expression is true. */
12647#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12648# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12649#else
12650# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12651#endif
12652
12653/** The instruction requires a 486 or later. */
12654#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12655# define IEMOP_HLP_MIN_486() do { } while (0)
12656#else
12657# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12658#endif
12659
12660/** The instruction requires a Pentium (586) or later. */
12661#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12662# define IEMOP_HLP_MIN_586() do { } while (0)
12663#else
12664# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12665#endif
12666
12667/** The instruction requires a PentiumPro (686) or later. */
12668#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12669# define IEMOP_HLP_MIN_686() do { } while (0)
12670#else
12671# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12672#endif
12673
12674
12675/** The instruction raises an \#UD in real and V8086 mode. */
12676#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12677 do \
12678 { \
12679 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12680 else return IEMOP_RAISE_INVALID_OPCODE(); \
12681 } while (0)
12682
12683#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12684/** This instruction raises an \#UD in real and V8086 mode or when not using a
12685 * 64-bit code segment when in long mode (applicable to all VMX instructions
12686 * except VMCALL).
12687 */
12688#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12689 do \
12690 { \
12691 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12692 && ( !IEM_IS_LONG_MODE(pVCpu) \
12693 || IEM_IS_64BIT_CODE(pVCpu))) \
12694 { /* likely */ } \
12695 else \
12696 { \
12697 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12698 { \
12699 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12700 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12701 return IEMOP_RAISE_INVALID_OPCODE(); \
12702 } \
12703 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12704 { \
12705 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12706 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12707 return IEMOP_RAISE_INVALID_OPCODE(); \
12708 } \
12709 } \
12710 } while (0)
12711
12712/** The instruction can only be executed in VMX operation (VMX root mode and
12713 * non-root mode).
12714 *
12715 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12716 */
12717# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12718 do \
12719 { \
12720 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12721 else \
12722 { \
12723 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12724 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12725 return IEMOP_RAISE_INVALID_OPCODE(); \
12726 } \
12727 } while (0)
12728#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12729
12730/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12731 * 64-bit mode. */
12732#define IEMOP_HLP_NO_64BIT() \
12733 do \
12734 { \
12735 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12736 return IEMOP_RAISE_INVALID_OPCODE(); \
12737 } while (0)
12738
12739/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12740 * 64-bit mode. */
12741#define IEMOP_HLP_ONLY_64BIT() \
12742 do \
12743 { \
12744 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12745 return IEMOP_RAISE_INVALID_OPCODE(); \
12746 } while (0)
12747
12748/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12749#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12750 do \
12751 { \
12752 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12753 iemRecalEffOpSize64Default(pVCpu); \
12754 } while (0)
12755
12756/** The instruction has 64-bit operand size if 64-bit mode. */
12757#define IEMOP_HLP_64BIT_OP_SIZE() \
12758 do \
12759 { \
12760 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12761 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12762 } while (0)
12763
12764/** Only a REX prefix immediately preceeding the first opcode byte takes
12765 * effect. This macro helps ensuring this as well as logging bad guest code. */
12766#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12767 do \
12768 { \
12769 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12770 { \
12771 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12772 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12773 pVCpu->iem.s.uRexB = 0; \
12774 pVCpu->iem.s.uRexIndex = 0; \
12775 pVCpu->iem.s.uRexReg = 0; \
12776 iemRecalEffOpSize(pVCpu); \
12777 } \
12778 } while (0)
12779
12780/**
12781 * Done decoding.
12782 */
12783#define IEMOP_HLP_DONE_DECODING() \
12784 do \
12785 { \
12786 /*nothing for now, maybe later... */ \
12787 } while (0)
12788
12789/**
12790 * Done decoding, raise \#UD exception if lock prefix present.
12791 */
12792#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12793 do \
12794 { \
12795 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12796 { /* likely */ } \
12797 else \
12798 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12799 } while (0)
12800
12801
12802/**
12803 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12804 * repnz or size prefixes are present, or if in real or v8086 mode.
12805 */
12806#define IEMOP_HLP_DONE_VEX_DECODING() \
12807 do \
12808 { \
12809 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12810 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12811 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12812 { /* likely */ } \
12813 else \
12814 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12815 } while (0)
12816
12817/**
12818 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12819 * repnz or size prefixes are present, or if in real or v8086 mode.
12820 */
12821#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12822 do \
12823 { \
12824 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12825 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12826 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12827 && pVCpu->iem.s.uVexLength == 0)) \
12828 { /* likely */ } \
12829 else \
12830 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12831 } while (0)
12832
12833
12834/**
12835 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12836 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12837 * register 0, or if in real or v8086 mode.
12838 */
12839#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12840 do \
12841 { \
12842 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12843 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12844 && !pVCpu->iem.s.uVex3rdReg \
12845 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12846 { /* likely */ } \
12847 else \
12848 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12849 } while (0)
12850
12851/**
12852 * Done decoding VEX, no V, L=0.
12853 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12854 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12855 */
12856#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12857 do \
12858 { \
12859 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12860 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12861 && pVCpu->iem.s.uVexLength == 0 \
12862 && pVCpu->iem.s.uVex3rdReg == 0 \
12863 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12864 { /* likely */ } \
12865 else \
12866 return IEMOP_RAISE_INVALID_OPCODE(); \
12867 } while (0)
12868
12869#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12870 do \
12871 { \
12872 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12873 { /* likely */ } \
12874 else \
12875 { \
12876 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12877 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12878 } \
12879 } while (0)
12880#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12881 do \
12882 { \
12883 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12884 { /* likely */ } \
12885 else \
12886 { \
12887 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12888 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12889 } \
12890 } while (0)
12891
12892/**
12893 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12894 * are present.
12895 */
12896#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12897 do \
12898 { \
12899 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12900 { /* likely */ } \
12901 else \
12902 return IEMOP_RAISE_INVALID_OPCODE(); \
12903 } while (0)
12904
12905/**
12906 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12907 * prefixes are present.
12908 */
12909#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12910 do \
12911 { \
12912 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12913 { /* likely */ } \
12914 else \
12915 return IEMOP_RAISE_INVALID_OPCODE(); \
12916 } while (0)
12917
12918
12919/**
12920 * Calculates the effective address of a ModR/M memory operand.
12921 *
12922 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12923 *
12924 * @return Strict VBox status code.
12925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12926 * @param bRm The ModRM byte.
12927 * @param cbImm The size of any immediate following the
12928 * effective address opcode bytes. Important for
12929 * RIP relative addressing.
12930 * @param pGCPtrEff Where to return the effective address.
12931 */
12932IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12933{
12934 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12935# define SET_SS_DEF() \
12936 do \
12937 { \
12938 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12939 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12940 } while (0)
12941
12942 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12943 {
12944/** @todo Check the effective address size crap! */
12945 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12946 {
12947 uint16_t u16EffAddr;
12948
12949 /* Handle the disp16 form with no registers first. */
12950 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12951 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12952 else
12953 {
12954 /* Get the displacment. */
12955 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12956 {
12957 case 0: u16EffAddr = 0; break;
12958 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12959 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12960 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12961 }
12962
12963 /* Add the base and index registers to the disp. */
12964 switch (bRm & X86_MODRM_RM_MASK)
12965 {
12966 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12967 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12968 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12969 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12970 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12971 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12972 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12973 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12974 }
12975 }
12976
12977 *pGCPtrEff = u16EffAddr;
12978 }
12979 else
12980 {
12981 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12982 uint32_t u32EffAddr;
12983
12984 /* Handle the disp32 form with no registers first. */
12985 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12986 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12987 else
12988 {
12989 /* Get the register (or SIB) value. */
12990 switch ((bRm & X86_MODRM_RM_MASK))
12991 {
12992 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12993 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12994 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12995 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12996 case 4: /* SIB */
12997 {
12998 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12999
13000 /* Get the index and scale it. */
13001 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13002 {
13003 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13004 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13005 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13006 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13007 case 4: u32EffAddr = 0; /*none */ break;
13008 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13009 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13010 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13011 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13012 }
13013 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13014
13015 /* add base */
13016 switch (bSib & X86_SIB_BASE_MASK)
13017 {
13018 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13019 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13020 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13021 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13022 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13023 case 5:
13024 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13025 {
13026 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13027 SET_SS_DEF();
13028 }
13029 else
13030 {
13031 uint32_t u32Disp;
13032 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13033 u32EffAddr += u32Disp;
13034 }
13035 break;
13036 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13037 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13039 }
13040 break;
13041 }
13042 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13043 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13044 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13045 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13046 }
13047
13048 /* Get and add the displacement. */
13049 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13050 {
13051 case 0:
13052 break;
13053 case 1:
13054 {
13055 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13056 u32EffAddr += i8Disp;
13057 break;
13058 }
13059 case 2:
13060 {
13061 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13062 u32EffAddr += u32Disp;
13063 break;
13064 }
13065 default:
13066 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13067 }
13068
13069 }
13070 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13071 *pGCPtrEff = u32EffAddr;
13072 else
13073 {
13074 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13075 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13076 }
13077 }
13078 }
13079 else
13080 {
13081 uint64_t u64EffAddr;
13082
13083 /* Handle the rip+disp32 form with no registers first. */
13084 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13085 {
13086 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13087 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13088 }
13089 else
13090 {
13091 /* Get the register (or SIB) value. */
13092 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13093 {
13094 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13095 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13096 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13097 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13098 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13099 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13100 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13101 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13102 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13103 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13104 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13105 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13106 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13107 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13108 /* SIB */
13109 case 4:
13110 case 12:
13111 {
13112 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13113
13114 /* Get the index and scale it. */
13115 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13116 {
13117 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13118 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13119 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13120 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13121 case 4: u64EffAddr = 0; /*none */ break;
13122 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13123 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13124 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13125 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13126 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13127 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13128 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13129 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13130 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13131 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13132 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13133 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13134 }
13135 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13136
13137 /* add base */
13138 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13139 {
13140 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13141 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13142 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13143 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13144 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13145 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13146 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13147 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13148 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13149 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13150 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13151 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13152 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13153 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13154 /* complicated encodings */
13155 case 5:
13156 case 13:
13157 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13158 {
13159 if (!pVCpu->iem.s.uRexB)
13160 {
13161 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13162 SET_SS_DEF();
13163 }
13164 else
13165 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13166 }
13167 else
13168 {
13169 uint32_t u32Disp;
13170 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13171 u64EffAddr += (int32_t)u32Disp;
13172 }
13173 break;
13174 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13175 }
13176 break;
13177 }
13178 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13179 }
13180
13181 /* Get and add the displacement. */
13182 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13183 {
13184 case 0:
13185 break;
13186 case 1:
13187 {
13188 int8_t i8Disp;
13189 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13190 u64EffAddr += i8Disp;
13191 break;
13192 }
13193 case 2:
13194 {
13195 uint32_t u32Disp;
13196 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13197 u64EffAddr += (int32_t)u32Disp;
13198 break;
13199 }
13200 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13201 }
13202
13203 }
13204
13205 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13206 *pGCPtrEff = u64EffAddr;
13207 else
13208 {
13209 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13210 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13211 }
13212 }
13213
13214 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13215 return VINF_SUCCESS;
13216}
13217
13218
13219/**
13220 * Calculates the effective address of a ModR/M memory operand.
13221 *
13222 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13223 *
13224 * @return Strict VBox status code.
13225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13226 * @param bRm The ModRM byte.
13227 * @param cbImm The size of any immediate following the
13228 * effective address opcode bytes. Important for
13229 * RIP relative addressing.
13230 * @param pGCPtrEff Where to return the effective address.
13231 * @param offRsp RSP displacement.
13232 */
13233IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13234{
13235 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13236# define SET_SS_DEF() \
13237 do \
13238 { \
13239 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13240 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13241 } while (0)
13242
13243 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13244 {
13245/** @todo Check the effective address size crap! */
13246 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13247 {
13248 uint16_t u16EffAddr;
13249
13250 /* Handle the disp16 form with no registers first. */
13251 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13252 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13253 else
13254 {
13255 /* Get the displacment. */
13256 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13257 {
13258 case 0: u16EffAddr = 0; break;
13259 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13260 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13261 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13262 }
13263
13264 /* Add the base and index registers to the disp. */
13265 switch (bRm & X86_MODRM_RM_MASK)
13266 {
13267 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13268 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13269 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13270 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13271 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13272 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13273 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13274 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13275 }
13276 }
13277
13278 *pGCPtrEff = u16EffAddr;
13279 }
13280 else
13281 {
13282 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13283 uint32_t u32EffAddr;
13284
13285 /* Handle the disp32 form with no registers first. */
13286 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13287 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13288 else
13289 {
13290 /* Get the register (or SIB) value. */
13291 switch ((bRm & X86_MODRM_RM_MASK))
13292 {
13293 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13294 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13295 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13296 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13297 case 4: /* SIB */
13298 {
13299 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13300
13301 /* Get the index and scale it. */
13302 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13303 {
13304 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13305 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13306 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13307 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13308 case 4: u32EffAddr = 0; /*none */ break;
13309 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13310 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13311 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13312 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13313 }
13314 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13315
13316 /* add base */
13317 switch (bSib & X86_SIB_BASE_MASK)
13318 {
13319 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13320 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13321 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13322 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13323 case 4:
13324 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13325 SET_SS_DEF();
13326 break;
13327 case 5:
13328 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13329 {
13330 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13331 SET_SS_DEF();
13332 }
13333 else
13334 {
13335 uint32_t u32Disp;
13336 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13337 u32EffAddr += u32Disp;
13338 }
13339 break;
13340 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13341 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13342 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13343 }
13344 break;
13345 }
13346 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13347 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13348 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13349 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13350 }
13351
13352 /* Get and add the displacement. */
13353 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13354 {
13355 case 0:
13356 break;
13357 case 1:
13358 {
13359 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13360 u32EffAddr += i8Disp;
13361 break;
13362 }
13363 case 2:
13364 {
13365 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13366 u32EffAddr += u32Disp;
13367 break;
13368 }
13369 default:
13370 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13371 }
13372
13373 }
13374 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13375 *pGCPtrEff = u32EffAddr;
13376 else
13377 {
13378 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13379 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13380 }
13381 }
13382 }
13383 else
13384 {
13385 uint64_t u64EffAddr;
13386
13387 /* Handle the rip+disp32 form with no registers first. */
13388 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13389 {
13390 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13391 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13392 }
13393 else
13394 {
13395 /* Get the register (or SIB) value. */
13396 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13397 {
13398 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13399 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13400 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13401 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13402 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13403 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13404 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13405 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13406 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13407 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13408 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13409 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13410 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13411 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13412 /* SIB */
13413 case 4:
13414 case 12:
13415 {
13416 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13417
13418 /* Get the index and scale it. */
13419 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13420 {
13421 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13422 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13423 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13424 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13425 case 4: u64EffAddr = 0; /*none */ break;
13426 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13427 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13428 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13429 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13430 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13431 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13432 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13433 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13434 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13435 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13436 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13437 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13438 }
13439 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13440
13441 /* add base */
13442 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13443 {
13444 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13445 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13446 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13447 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13448 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13449 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13450 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13451 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13452 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13453 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13454 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13455 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13456 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13457 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13458 /* complicated encodings */
13459 case 5:
13460 case 13:
13461 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13462 {
13463 if (!pVCpu->iem.s.uRexB)
13464 {
13465 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13466 SET_SS_DEF();
13467 }
13468 else
13469 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13470 }
13471 else
13472 {
13473 uint32_t u32Disp;
13474 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13475 u64EffAddr += (int32_t)u32Disp;
13476 }
13477 break;
13478 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13479 }
13480 break;
13481 }
13482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13483 }
13484
13485 /* Get and add the displacement. */
13486 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13487 {
13488 case 0:
13489 break;
13490 case 1:
13491 {
13492 int8_t i8Disp;
13493 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13494 u64EffAddr += i8Disp;
13495 break;
13496 }
13497 case 2:
13498 {
13499 uint32_t u32Disp;
13500 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13501 u64EffAddr += (int32_t)u32Disp;
13502 break;
13503 }
13504 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13505 }
13506
13507 }
13508
13509 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13510 *pGCPtrEff = u64EffAddr;
13511 else
13512 {
13513 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13514 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13515 }
13516 }
13517
13518 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13519 return VINF_SUCCESS;
13520}
13521
13522
13523#ifdef IEM_WITH_SETJMP
13524/**
13525 * Calculates the effective address of a ModR/M memory operand.
13526 *
13527 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13528 *
13529 * May longjmp on internal error.
13530 *
13531 * @return The effective address.
13532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13533 * @param bRm The ModRM byte.
13534 * @param cbImm The size of any immediate following the
13535 * effective address opcode bytes. Important for
13536 * RIP relative addressing.
13537 */
13538IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13539{
13540 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13541# define SET_SS_DEF() \
13542 do \
13543 { \
13544 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13545 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13546 } while (0)
13547
13548 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13549 {
13550/** @todo Check the effective address size crap! */
13551 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13552 {
13553 uint16_t u16EffAddr;
13554
13555 /* Handle the disp16 form with no registers first. */
13556 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13557 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13558 else
13559 {
13560 /* Get the displacment. */
13561 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13562 {
13563 case 0: u16EffAddr = 0; break;
13564 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13565 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13566 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13567 }
13568
13569 /* Add the base and index registers to the disp. */
13570 switch (bRm & X86_MODRM_RM_MASK)
13571 {
13572 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13573 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13574 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13575 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13576 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13577 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13578 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13579 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13580 }
13581 }
13582
13583 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13584 return u16EffAddr;
13585 }
13586
13587 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13588 uint32_t u32EffAddr;
13589
13590 /* Handle the disp32 form with no registers first. */
13591 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13592 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13593 else
13594 {
13595 /* Get the register (or SIB) value. */
13596 switch ((bRm & X86_MODRM_RM_MASK))
13597 {
13598 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13599 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13600 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13601 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13602 case 4: /* SIB */
13603 {
13604 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13605
13606 /* Get the index and scale it. */
13607 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13608 {
13609 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13610 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13611 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13612 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13613 case 4: u32EffAddr = 0; /*none */ break;
13614 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13615 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13616 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13617 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13618 }
13619 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13620
13621 /* add base */
13622 switch (bSib & X86_SIB_BASE_MASK)
13623 {
13624 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13625 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13626 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13627 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13628 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13629 case 5:
13630 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13631 {
13632 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13633 SET_SS_DEF();
13634 }
13635 else
13636 {
13637 uint32_t u32Disp;
13638 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13639 u32EffAddr += u32Disp;
13640 }
13641 break;
13642 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13643 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13644 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13645 }
13646 break;
13647 }
13648 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13649 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13650 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13651 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13652 }
13653
13654 /* Get and add the displacement. */
13655 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13656 {
13657 case 0:
13658 break;
13659 case 1:
13660 {
13661 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13662 u32EffAddr += i8Disp;
13663 break;
13664 }
13665 case 2:
13666 {
13667 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13668 u32EffAddr += u32Disp;
13669 break;
13670 }
13671 default:
13672 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13673 }
13674 }
13675
13676 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13677 {
13678 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13679 return u32EffAddr;
13680 }
13681 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13682 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13683 return u32EffAddr & UINT16_MAX;
13684 }
13685
13686 uint64_t u64EffAddr;
13687
13688 /* Handle the rip+disp32 form with no registers first. */
13689 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13690 {
13691 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13692 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13693 }
13694 else
13695 {
13696 /* Get the register (or SIB) value. */
13697 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13698 {
13699 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13700 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13701 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13702 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13703 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13704 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13705 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13706 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13707 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13708 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13709 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13710 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13711 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13712 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13713 /* SIB */
13714 case 4:
13715 case 12:
13716 {
13717 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13718
13719 /* Get the index and scale it. */
13720 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13721 {
13722 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13723 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13724 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13725 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13726 case 4: u64EffAddr = 0; /*none */ break;
13727 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13728 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13729 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13730 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13731 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13732 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13733 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13734 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13735 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13736 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13737 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13738 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13739 }
13740 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13741
13742 /* add base */
13743 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13744 {
13745 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13746 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13747 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13748 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13749 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13750 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13751 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13752 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13753 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13754 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13755 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13756 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13757 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13758 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13759 /* complicated encodings */
13760 case 5:
13761 case 13:
13762 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13763 {
13764 if (!pVCpu->iem.s.uRexB)
13765 {
13766 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13767 SET_SS_DEF();
13768 }
13769 else
13770 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13771 }
13772 else
13773 {
13774 uint32_t u32Disp;
13775 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13776 u64EffAddr += (int32_t)u32Disp;
13777 }
13778 break;
13779 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13780 }
13781 break;
13782 }
13783 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13784 }
13785
13786 /* Get and add the displacement. */
13787 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13788 {
13789 case 0:
13790 break;
13791 case 1:
13792 {
13793 int8_t i8Disp;
13794 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13795 u64EffAddr += i8Disp;
13796 break;
13797 }
13798 case 2:
13799 {
13800 uint32_t u32Disp;
13801 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13802 u64EffAddr += (int32_t)u32Disp;
13803 break;
13804 }
13805 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13806 }
13807
13808 }
13809
13810 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13811 {
13812 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13813 return u64EffAddr;
13814 }
13815 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13816 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13817 return u64EffAddr & UINT32_MAX;
13818}
13819#endif /* IEM_WITH_SETJMP */
13820
13821/** @} */
13822
13823
13824
13825/*
13826 * Include the instructions
13827 */
13828#include "IEMAllInstructions.cpp.h"
13829
13830
13831
13832#ifdef LOG_ENABLED
13833/**
13834 * Logs the current instruction.
13835 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13836 * @param fSameCtx Set if we have the same context information as the VMM,
13837 * clear if we may have already executed an instruction in
13838 * our debug context. When clear, we assume IEMCPU holds
13839 * valid CPU mode info.
13840 *
13841 * The @a fSameCtx parameter is now misleading and obsolete.
13842 * @param pszFunction The IEM function doing the execution.
13843 */
13844IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13845{
13846# ifdef IN_RING3
13847 if (LogIs2Enabled())
13848 {
13849 char szInstr[256];
13850 uint32_t cbInstr = 0;
13851 if (fSameCtx)
13852 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13853 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13854 szInstr, sizeof(szInstr), &cbInstr);
13855 else
13856 {
13857 uint32_t fFlags = 0;
13858 switch (pVCpu->iem.s.enmCpuMode)
13859 {
13860 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13861 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13862 case IEMMODE_16BIT:
13863 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13864 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13865 else
13866 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13867 break;
13868 }
13869 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13870 szInstr, sizeof(szInstr), &cbInstr);
13871 }
13872
13873 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13874 Log2(("**** %s\n"
13875 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13876 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13877 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13878 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13879 " %s\n"
13880 , pszFunction,
13881 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13882 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13883 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13884 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13885 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13886 szInstr));
13887
13888 if (LogIs3Enabled())
13889 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13890 }
13891 else
13892# endif
13893 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13894 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13895 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13896}
13897#endif /* LOG_ENABLED */
13898
13899
13900/**
13901 * Makes status code addjustments (pass up from I/O and access handler)
13902 * as well as maintaining statistics.
13903 *
13904 * @returns Strict VBox status code to pass up.
13905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13906 * @param rcStrict The status from executing an instruction.
13907 */
13908DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13909{
13910 if (rcStrict != VINF_SUCCESS)
13911 {
13912 if (RT_SUCCESS(rcStrict))
13913 {
13914 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13915 || rcStrict == VINF_IOM_R3_IOPORT_READ
13916 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13917 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13918 || rcStrict == VINF_IOM_R3_MMIO_READ
13919 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13920 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13921 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13922 || rcStrict == VINF_CPUM_R3_MSR_READ
13923 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13924 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13925 || rcStrict == VINF_EM_RAW_TO_R3
13926 || rcStrict == VINF_EM_TRIPLE_FAULT
13927 || rcStrict == VINF_GIM_R3_HYPERCALL
13928 /* raw-mode / virt handlers only: */
13929 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13930 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13931 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13932 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13933 || rcStrict == VINF_SELM_SYNC_GDT
13934 || rcStrict == VINF_CSAM_PENDING_ACTION
13935 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13936 /* nested hw.virt codes: */
13937 || rcStrict == VINF_VMX_VMEXIT
13938 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13939 || rcStrict == VINF_SVM_VMEXIT
13940 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13941/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13942 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13943#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13944 if ( rcStrict == VINF_VMX_VMEXIT
13945 && rcPassUp == VINF_SUCCESS)
13946 rcStrict = VINF_SUCCESS;
13947 else
13948#endif
13949#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13950 if ( rcStrict == VINF_SVM_VMEXIT
13951 && rcPassUp == VINF_SUCCESS)
13952 rcStrict = VINF_SUCCESS;
13953 else
13954#endif
13955 if (rcPassUp == VINF_SUCCESS)
13956 pVCpu->iem.s.cRetInfStatuses++;
13957 else if ( rcPassUp < VINF_EM_FIRST
13958 || rcPassUp > VINF_EM_LAST
13959 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13960 {
13961 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13962 pVCpu->iem.s.cRetPassUpStatus++;
13963 rcStrict = rcPassUp;
13964 }
13965 else
13966 {
13967 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13968 pVCpu->iem.s.cRetInfStatuses++;
13969 }
13970 }
13971 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13972 pVCpu->iem.s.cRetAspectNotImplemented++;
13973 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13974 pVCpu->iem.s.cRetInstrNotImplemented++;
13975 else
13976 pVCpu->iem.s.cRetErrStatuses++;
13977 }
13978 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13979 {
13980 pVCpu->iem.s.cRetPassUpStatus++;
13981 rcStrict = pVCpu->iem.s.rcPassUp;
13982 }
13983
13984 return rcStrict;
13985}
13986
13987
13988/**
13989 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13990 * IEMExecOneWithPrefetchedByPC.
13991 *
13992 * Similar code is found in IEMExecLots.
13993 *
13994 * @return Strict VBox status code.
13995 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13996 * @param fExecuteInhibit If set, execute the instruction following CLI,
13997 * POP SS and MOV SS,GR.
13998 * @param pszFunction The calling function name.
13999 */
14000DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
14001{
14002 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14003 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14004 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14005 RT_NOREF_PV(pszFunction);
14006
14007#ifdef IEM_WITH_SETJMP
14008 VBOXSTRICTRC rcStrict;
14009 jmp_buf JmpBuf;
14010 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14011 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14012 if ((rcStrict = setjmp(JmpBuf)) == 0)
14013 {
14014 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14015 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14016 }
14017 else
14018 pVCpu->iem.s.cLongJumps++;
14019 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14020#else
14021 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14022 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14023#endif
14024 if (rcStrict == VINF_SUCCESS)
14025 pVCpu->iem.s.cInstructions++;
14026 if (pVCpu->iem.s.cActiveMappings > 0)
14027 {
14028 Assert(rcStrict != VINF_SUCCESS);
14029 iemMemRollback(pVCpu);
14030 }
14031 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14032 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14033 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14034
14035//#ifdef DEBUG
14036// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14037//#endif
14038
14039#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14040 /*
14041 * Perform any VMX nested-guest instruction boundary actions.
14042 *
14043 * If any of these causes a VM-exit, we must skip executing the next
14044 * instruction (would run into stale page tables). A VM-exit makes sure
14045 * there is no interrupt-inhibition, so that should ensure we don't go
14046 * to try execute the next instruction. Clearing fExecuteInhibit is
14047 * problematic because of the setjmp/longjmp clobbering above.
14048 */
14049 if ( rcStrict == VINF_SUCCESS
14050 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14051 {
14052 /* TPR-below threshold/APIC write has the highest priority. */
14053 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14054 {
14055 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14056 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14057 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14058 }
14059 /* MTF takes priority over VMX-preemption timer. */
14060 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14061 {
14062 rcStrict = iemVmxVmexitMtf(pVCpu);
14063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14064 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14065 }
14066 /* VMX preemption timer takes priority over NMI-window exits. */
14067 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14068 {
14069 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14070 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14071 rcStrict = VINF_SUCCESS;
14072 else
14073 {
14074 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14075 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14076 }
14077 }
14078 /* NMI-window VM-exit. */
14079 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW))
14080 {
14081 rcStrict = iemVmxVmexitNmiWindow(pVCpu);
14082 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14083 }
14084 }
14085#endif
14086
14087 /* Execute the next instruction as well if a cli, pop ss or
14088 mov ss, Gr has just completed successfully. */
14089 if ( fExecuteInhibit
14090 && rcStrict == VINF_SUCCESS
14091 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14092 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14093 {
14094 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14095 if (rcStrict == VINF_SUCCESS)
14096 {
14097#ifdef LOG_ENABLED
14098 iemLogCurInstr(pVCpu, false, pszFunction);
14099#endif
14100#ifdef IEM_WITH_SETJMP
14101 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14102 if ((rcStrict = setjmp(JmpBuf)) == 0)
14103 {
14104 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14105 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14106 }
14107 else
14108 pVCpu->iem.s.cLongJumps++;
14109 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14110#else
14111 IEM_OPCODE_GET_NEXT_U8(&b);
14112 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14113#endif
14114 if (rcStrict == VINF_SUCCESS)
14115 pVCpu->iem.s.cInstructions++;
14116 if (pVCpu->iem.s.cActiveMappings > 0)
14117 {
14118 Assert(rcStrict != VINF_SUCCESS);
14119 iemMemRollback(pVCpu);
14120 }
14121 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14122 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14123 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14124 }
14125 else if (pVCpu->iem.s.cActiveMappings > 0)
14126 iemMemRollback(pVCpu);
14127 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14128 }
14129
14130 /*
14131 * Return value fiddling, statistics and sanity assertions.
14132 */
14133 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14134
14135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14136 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14137 return rcStrict;
14138}
14139
14140
14141#ifdef IN_RC
14142/**
14143 * Re-enters raw-mode or ensure we return to ring-3.
14144 *
14145 * @returns rcStrict, maybe modified.
14146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14147 * @param rcStrict The status code returne by the interpreter.
14148 */
14149DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14150{
14151 if ( !pVCpu->iem.s.fInPatchCode
14152 && ( rcStrict == VINF_SUCCESS
14153 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14154 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14155 {
14156 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14157 CPUMRawEnter(pVCpu);
14158 else
14159 {
14160 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14161 rcStrict = VINF_EM_RESCHEDULE;
14162 }
14163 }
14164 return rcStrict;
14165}
14166#endif
14167
14168
14169/**
14170 * Execute one instruction.
14171 *
14172 * @return Strict VBox status code.
14173 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14174 */
14175VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14176{
14177#ifdef LOG_ENABLED
14178 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14179#endif
14180
14181 /*
14182 * Do the decoding and emulation.
14183 */
14184 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14185 if (rcStrict == VINF_SUCCESS)
14186 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14187 else if (pVCpu->iem.s.cActiveMappings > 0)
14188 iemMemRollback(pVCpu);
14189
14190#ifdef IN_RC
14191 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14192#endif
14193 if (rcStrict != VINF_SUCCESS)
14194 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14195 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14196 return rcStrict;
14197}
14198
14199
14200VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14201{
14202 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14203
14204 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14205 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14206 if (rcStrict == VINF_SUCCESS)
14207 {
14208 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14209 if (pcbWritten)
14210 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14211 }
14212 else if (pVCpu->iem.s.cActiveMappings > 0)
14213 iemMemRollback(pVCpu);
14214
14215#ifdef IN_RC
14216 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14217#endif
14218 return rcStrict;
14219}
14220
14221
14222VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14223 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14224{
14225 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14226
14227 VBOXSTRICTRC rcStrict;
14228 if ( cbOpcodeBytes
14229 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14230 {
14231 iemInitDecoder(pVCpu, false);
14232#ifdef IEM_WITH_CODE_TLB
14233 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14234 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14235 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14236 pVCpu->iem.s.offCurInstrStart = 0;
14237 pVCpu->iem.s.offInstrNextByte = 0;
14238#else
14239 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14240 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14241#endif
14242 rcStrict = VINF_SUCCESS;
14243 }
14244 else
14245 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14246 if (rcStrict == VINF_SUCCESS)
14247 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14248 else if (pVCpu->iem.s.cActiveMappings > 0)
14249 iemMemRollback(pVCpu);
14250
14251#ifdef IN_RC
14252 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14253#endif
14254 return rcStrict;
14255}
14256
14257
14258VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14259{
14260 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14261
14262 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14263 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14264 if (rcStrict == VINF_SUCCESS)
14265 {
14266 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14267 if (pcbWritten)
14268 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14269 }
14270 else if (pVCpu->iem.s.cActiveMappings > 0)
14271 iemMemRollback(pVCpu);
14272
14273#ifdef IN_RC
14274 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14275#endif
14276 return rcStrict;
14277}
14278
14279
14280VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14281 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14282{
14283 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14284
14285 VBOXSTRICTRC rcStrict;
14286 if ( cbOpcodeBytes
14287 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14288 {
14289 iemInitDecoder(pVCpu, true);
14290#ifdef IEM_WITH_CODE_TLB
14291 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14292 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14293 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14294 pVCpu->iem.s.offCurInstrStart = 0;
14295 pVCpu->iem.s.offInstrNextByte = 0;
14296#else
14297 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14298 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14299#endif
14300 rcStrict = VINF_SUCCESS;
14301 }
14302 else
14303 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14304 if (rcStrict == VINF_SUCCESS)
14305 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14306 else if (pVCpu->iem.s.cActiveMappings > 0)
14307 iemMemRollback(pVCpu);
14308
14309#ifdef IN_RC
14310 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14311#endif
14312 return rcStrict;
14313}
14314
14315
14316/**
14317 * For debugging DISGetParamSize, may come in handy.
14318 *
14319 * @returns Strict VBox status code.
14320 * @param pVCpu The cross context virtual CPU structure of the
14321 * calling EMT.
14322 * @param pCtxCore The context core structure.
14323 * @param OpcodeBytesPC The PC of the opcode bytes.
14324 * @param pvOpcodeBytes Prefeched opcode bytes.
14325 * @param cbOpcodeBytes Number of prefetched bytes.
14326 * @param pcbWritten Where to return the number of bytes written.
14327 * Optional.
14328 */
14329VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14330 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14331 uint32_t *pcbWritten)
14332{
14333 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14334
14335 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14336 VBOXSTRICTRC rcStrict;
14337 if ( cbOpcodeBytes
14338 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14339 {
14340 iemInitDecoder(pVCpu, true);
14341#ifdef IEM_WITH_CODE_TLB
14342 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14343 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14344 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14345 pVCpu->iem.s.offCurInstrStart = 0;
14346 pVCpu->iem.s.offInstrNextByte = 0;
14347#else
14348 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14349 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14350#endif
14351 rcStrict = VINF_SUCCESS;
14352 }
14353 else
14354 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14355 if (rcStrict == VINF_SUCCESS)
14356 {
14357 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14358 if (pcbWritten)
14359 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14360 }
14361 else if (pVCpu->iem.s.cActiveMappings > 0)
14362 iemMemRollback(pVCpu);
14363
14364#ifdef IN_RC
14365 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14366#endif
14367 return rcStrict;
14368}
14369
14370
14371VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14372{
14373 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14374 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14375
14376 /*
14377 * See if there is an interrupt pending in TRPM, inject it if we can.
14378 */
14379 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14380#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14381 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14382 if (fIntrEnabled)
14383 {
14384 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14385 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14386 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14387 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14388 else
14389 {
14390 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14391 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14392 }
14393 }
14394#else
14395 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14396#endif
14397 if ( fIntrEnabled
14398 && TRPMHasTrap(pVCpu)
14399 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14400 {
14401 uint8_t u8TrapNo;
14402 TRPMEVENT enmType;
14403 RTGCUINT uErrCode;
14404 RTGCPTR uCr2;
14405 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14406 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14407 TRPMResetTrap(pVCpu);
14408#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14409 /* Injecting an event may cause a VM-exit. */
14410 if ( rcStrict != VINF_SUCCESS
14411 && rcStrict != VINF_IEM_RAISED_XCPT)
14412 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14413#else
14414 NOREF(rcStrict);
14415#endif
14416 }
14417
14418 /*
14419 * Initial decoder init w/ prefetch, then setup setjmp.
14420 */
14421 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14422 if (rcStrict == VINF_SUCCESS)
14423 {
14424#ifdef IEM_WITH_SETJMP
14425 jmp_buf JmpBuf;
14426 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14427 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14428 pVCpu->iem.s.cActiveMappings = 0;
14429 if ((rcStrict = setjmp(JmpBuf)) == 0)
14430#endif
14431 {
14432 /*
14433 * The run loop. We limit ourselves to 4096 instructions right now.
14434 */
14435 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14436 PVM pVM = pVCpu->CTX_SUFF(pVM);
14437 for (;;)
14438 {
14439 /*
14440 * Log the state.
14441 */
14442#ifdef LOG_ENABLED
14443 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14444#endif
14445
14446 /*
14447 * Do the decoding and emulation.
14448 */
14449 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14450 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14451 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14452 {
14453 Assert(pVCpu->iem.s.cActiveMappings == 0);
14454 pVCpu->iem.s.cInstructions++;
14455 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14456 {
14457 uint64_t fCpu = pVCpu->fLocalForcedActions
14458 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14459 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14460 | VMCPU_FF_TLB_FLUSH
14461#ifdef VBOX_WITH_RAW_MODE
14462 | VMCPU_FF_TRPM_SYNC_IDT
14463 | VMCPU_FF_SELM_SYNC_TSS
14464 | VMCPU_FF_SELM_SYNC_GDT
14465 | VMCPU_FF_SELM_SYNC_LDT
14466#endif
14467 | VMCPU_FF_INHIBIT_INTERRUPTS
14468 | VMCPU_FF_BLOCK_NMIS
14469 | VMCPU_FF_UNHALT ));
14470
14471 if (RT_LIKELY( ( !fCpu
14472 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14473 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14474 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14475 {
14476 if (cMaxInstructionsGccStupidity-- > 0)
14477 {
14478 /* Poll timers every now an then according to the caller's specs. */
14479 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14480 || !TMTimerPollBool(pVM, pVCpu))
14481 {
14482 Assert(pVCpu->iem.s.cActiveMappings == 0);
14483 iemReInitDecoder(pVCpu);
14484 continue;
14485 }
14486 }
14487 }
14488 }
14489 Assert(pVCpu->iem.s.cActiveMappings == 0);
14490 }
14491 else if (pVCpu->iem.s.cActiveMappings > 0)
14492 iemMemRollback(pVCpu);
14493 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14494 break;
14495 }
14496 }
14497#ifdef IEM_WITH_SETJMP
14498 else
14499 {
14500 if (pVCpu->iem.s.cActiveMappings > 0)
14501 iemMemRollback(pVCpu);
14502# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14503 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14504# endif
14505 pVCpu->iem.s.cLongJumps++;
14506 }
14507 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14508#endif
14509
14510 /*
14511 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14512 */
14513 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14514 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14515 }
14516 else
14517 {
14518 if (pVCpu->iem.s.cActiveMappings > 0)
14519 iemMemRollback(pVCpu);
14520
14521#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14522 /*
14523 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14524 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14525 */
14526 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14527#endif
14528 }
14529
14530 /*
14531 * Maybe re-enter raw-mode and log.
14532 */
14533#ifdef IN_RC
14534 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14535#endif
14536 if (rcStrict != VINF_SUCCESS)
14537 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14538 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14539 if (pcInstructions)
14540 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14541 return rcStrict;
14542}
14543
14544
14545/**
14546 * Interface used by EMExecuteExec, does exit statistics and limits.
14547 *
14548 * @returns Strict VBox status code.
14549 * @param pVCpu The cross context virtual CPU structure.
14550 * @param fWillExit To be defined.
14551 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14552 * @param cMaxInstructions Maximum number of instructions to execute.
14553 * @param cMaxInstructionsWithoutExits
14554 * The max number of instructions without exits.
14555 * @param pStats Where to return statistics.
14556 */
14557VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14558 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14559{
14560 NOREF(fWillExit); /** @todo define flexible exit crits */
14561
14562 /*
14563 * Initialize return stats.
14564 */
14565 pStats->cInstructions = 0;
14566 pStats->cExits = 0;
14567 pStats->cMaxExitDistance = 0;
14568 pStats->cReserved = 0;
14569
14570 /*
14571 * Initial decoder init w/ prefetch, then setup setjmp.
14572 */
14573 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14574 if (rcStrict == VINF_SUCCESS)
14575 {
14576#ifdef IEM_WITH_SETJMP
14577 jmp_buf JmpBuf;
14578 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14579 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14580 pVCpu->iem.s.cActiveMappings = 0;
14581 if ((rcStrict = setjmp(JmpBuf)) == 0)
14582#endif
14583 {
14584#ifdef IN_RING0
14585 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14586#endif
14587 uint32_t cInstructionSinceLastExit = 0;
14588
14589 /*
14590 * The run loop. We limit ourselves to 4096 instructions right now.
14591 */
14592 PVM pVM = pVCpu->CTX_SUFF(pVM);
14593 for (;;)
14594 {
14595 /*
14596 * Log the state.
14597 */
14598#ifdef LOG_ENABLED
14599 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14600#endif
14601
14602 /*
14603 * Do the decoding and emulation.
14604 */
14605 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14606
14607 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14608 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14609
14610 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14611 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14612 {
14613 pStats->cExits += 1;
14614 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14615 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14616 cInstructionSinceLastExit = 0;
14617 }
14618
14619 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14620 {
14621 Assert(pVCpu->iem.s.cActiveMappings == 0);
14622 pVCpu->iem.s.cInstructions++;
14623 pStats->cInstructions++;
14624 cInstructionSinceLastExit++;
14625 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14626 {
14627 uint64_t fCpu = pVCpu->fLocalForcedActions
14628 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14629 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14630 | VMCPU_FF_TLB_FLUSH
14631#ifdef VBOX_WITH_RAW_MODE
14632 | VMCPU_FF_TRPM_SYNC_IDT
14633 | VMCPU_FF_SELM_SYNC_TSS
14634 | VMCPU_FF_SELM_SYNC_GDT
14635 | VMCPU_FF_SELM_SYNC_LDT
14636#endif
14637 | VMCPU_FF_INHIBIT_INTERRUPTS
14638 | VMCPU_FF_BLOCK_NMIS
14639 | VMCPU_FF_UNHALT ));
14640
14641 if (RT_LIKELY( ( ( !fCpu
14642 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14643 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14644 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14645 || pStats->cInstructions < cMinInstructions))
14646 {
14647 if (pStats->cInstructions < cMaxInstructions)
14648 {
14649 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14650 {
14651#ifdef IN_RING0
14652 if ( !fCheckPreemptionPending
14653 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14654#endif
14655 {
14656 Assert(pVCpu->iem.s.cActiveMappings == 0);
14657 iemReInitDecoder(pVCpu);
14658 continue;
14659 }
14660#ifdef IN_RING0
14661 rcStrict = VINF_EM_RAW_INTERRUPT;
14662 break;
14663#endif
14664 }
14665 }
14666 }
14667 Assert(!(fCpu & VMCPU_FF_IEM));
14668 }
14669 Assert(pVCpu->iem.s.cActiveMappings == 0);
14670 }
14671 else if (pVCpu->iem.s.cActiveMappings > 0)
14672 iemMemRollback(pVCpu);
14673 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14674 break;
14675 }
14676 }
14677#ifdef IEM_WITH_SETJMP
14678 else
14679 {
14680 if (pVCpu->iem.s.cActiveMappings > 0)
14681 iemMemRollback(pVCpu);
14682 pVCpu->iem.s.cLongJumps++;
14683 }
14684 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14685#endif
14686
14687 /*
14688 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14689 */
14690 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14691 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14692 }
14693 else
14694 {
14695 if (pVCpu->iem.s.cActiveMappings > 0)
14696 iemMemRollback(pVCpu);
14697
14698#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14699 /*
14700 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14701 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14702 */
14703 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14704#endif
14705 }
14706
14707 /*
14708 * Maybe re-enter raw-mode and log.
14709 */
14710#ifdef IN_RC
14711 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14712#endif
14713 if (rcStrict != VINF_SUCCESS)
14714 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14715 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14716 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14717 return rcStrict;
14718}
14719
14720
14721/**
14722 * Injects a trap, fault, abort, software interrupt or external interrupt.
14723 *
14724 * The parameter list matches TRPMQueryTrapAll pretty closely.
14725 *
14726 * @returns Strict VBox status code.
14727 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14728 * @param u8TrapNo The trap number.
14729 * @param enmType What type is it (trap/fault/abort), software
14730 * interrupt or hardware interrupt.
14731 * @param uErrCode The error code if applicable.
14732 * @param uCr2 The CR2 value if applicable.
14733 * @param cbInstr The instruction length (only relevant for
14734 * software interrupts).
14735 */
14736VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14737 uint8_t cbInstr)
14738{
14739 iemInitDecoder(pVCpu, false);
14740#ifdef DBGFTRACE_ENABLED
14741 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14742 u8TrapNo, enmType, uErrCode, uCr2);
14743#endif
14744
14745 uint32_t fFlags;
14746 switch (enmType)
14747 {
14748 case TRPM_HARDWARE_INT:
14749 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14750 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14751 uErrCode = uCr2 = 0;
14752 break;
14753
14754 case TRPM_SOFTWARE_INT:
14755 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14756 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14757 uErrCode = uCr2 = 0;
14758 break;
14759
14760 case TRPM_TRAP:
14761 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14762 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14763 if (u8TrapNo == X86_XCPT_PF)
14764 fFlags |= IEM_XCPT_FLAGS_CR2;
14765 switch (u8TrapNo)
14766 {
14767 case X86_XCPT_DF:
14768 case X86_XCPT_TS:
14769 case X86_XCPT_NP:
14770 case X86_XCPT_SS:
14771 case X86_XCPT_PF:
14772 case X86_XCPT_AC:
14773 fFlags |= IEM_XCPT_FLAGS_ERR;
14774 break;
14775 }
14776 break;
14777
14778 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14779 }
14780
14781 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14782
14783 if (pVCpu->iem.s.cActiveMappings > 0)
14784 iemMemRollback(pVCpu);
14785
14786 return rcStrict;
14787}
14788
14789
14790/**
14791 * Injects the active TRPM event.
14792 *
14793 * @returns Strict VBox status code.
14794 * @param pVCpu The cross context virtual CPU structure.
14795 */
14796VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14797{
14798#ifndef IEM_IMPLEMENTS_TASKSWITCH
14799 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14800#else
14801 uint8_t u8TrapNo;
14802 TRPMEVENT enmType;
14803 RTGCUINT uErrCode;
14804 RTGCUINTPTR uCr2;
14805 uint8_t cbInstr;
14806 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14807 if (RT_FAILURE(rc))
14808 return rc;
14809
14810 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14811#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14812 if (rcStrict == VINF_SVM_VMEXIT)
14813 rcStrict = VINF_SUCCESS;
14814#endif
14815#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14816 if (rcStrict == VINF_VMX_VMEXIT)
14817 rcStrict = VINF_SUCCESS;
14818#endif
14819 /** @todo Are there any other codes that imply the event was successfully
14820 * delivered to the guest? See @bugref{6607}. */
14821 if ( rcStrict == VINF_SUCCESS
14822 || rcStrict == VINF_IEM_RAISED_XCPT)
14823 TRPMResetTrap(pVCpu);
14824
14825 return rcStrict;
14826#endif
14827}
14828
14829
14830VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14831{
14832 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14833 return VERR_NOT_IMPLEMENTED;
14834}
14835
14836
14837VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14838{
14839 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14840 return VERR_NOT_IMPLEMENTED;
14841}
14842
14843
14844#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14845/**
14846 * Executes a IRET instruction with default operand size.
14847 *
14848 * This is for PATM.
14849 *
14850 * @returns VBox status code.
14851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14852 * @param pCtxCore The register frame.
14853 */
14854VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14855{
14856 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14857
14858 iemCtxCoreToCtx(pCtx, pCtxCore);
14859 iemInitDecoder(pVCpu);
14860 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14861 if (rcStrict == VINF_SUCCESS)
14862 iemCtxToCtxCore(pCtxCore, pCtx);
14863 else
14864 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14865 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14866 return rcStrict;
14867}
14868#endif
14869
14870
14871/**
14872 * Macro used by the IEMExec* method to check the given instruction length.
14873 *
14874 * Will return on failure!
14875 *
14876 * @param a_cbInstr The given instruction length.
14877 * @param a_cbMin The minimum length.
14878 */
14879#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14880 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14881 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14882
14883
14884/**
14885 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14886 *
14887 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14888 *
14889 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14891 * @param rcStrict The status code to fiddle.
14892 */
14893DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14894{
14895 iemUninitExec(pVCpu);
14896#ifdef IN_RC
14897 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14898#else
14899 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14900#endif
14901}
14902
14903
14904/**
14905 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14906 *
14907 * This API ASSUMES that the caller has already verified that the guest code is
14908 * allowed to access the I/O port. (The I/O port is in the DX register in the
14909 * guest state.)
14910 *
14911 * @returns Strict VBox status code.
14912 * @param pVCpu The cross context virtual CPU structure.
14913 * @param cbValue The size of the I/O port access (1, 2, or 4).
14914 * @param enmAddrMode The addressing mode.
14915 * @param fRepPrefix Indicates whether a repeat prefix is used
14916 * (doesn't matter which for this instruction).
14917 * @param cbInstr The instruction length in bytes.
14918 * @param iEffSeg The effective segment address.
14919 * @param fIoChecked Whether the access to the I/O port has been
14920 * checked or not. It's typically checked in the
14921 * HM scenario.
14922 */
14923VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14924 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14925{
14926 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14927 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14928
14929 /*
14930 * State init.
14931 */
14932 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14933
14934 /*
14935 * Switch orgy for getting to the right handler.
14936 */
14937 VBOXSTRICTRC rcStrict;
14938 if (fRepPrefix)
14939 {
14940 switch (enmAddrMode)
14941 {
14942 case IEMMODE_16BIT:
14943 switch (cbValue)
14944 {
14945 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14946 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14947 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14948 default:
14949 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14950 }
14951 break;
14952
14953 case IEMMODE_32BIT:
14954 switch (cbValue)
14955 {
14956 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14957 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14958 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14959 default:
14960 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14961 }
14962 break;
14963
14964 case IEMMODE_64BIT:
14965 switch (cbValue)
14966 {
14967 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14968 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14969 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14970 default:
14971 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14972 }
14973 break;
14974
14975 default:
14976 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14977 }
14978 }
14979 else
14980 {
14981 switch (enmAddrMode)
14982 {
14983 case IEMMODE_16BIT:
14984 switch (cbValue)
14985 {
14986 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14987 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14988 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14989 default:
14990 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14991 }
14992 break;
14993
14994 case IEMMODE_32BIT:
14995 switch (cbValue)
14996 {
14997 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14998 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14999 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15000 default:
15001 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15002 }
15003 break;
15004
15005 case IEMMODE_64BIT:
15006 switch (cbValue)
15007 {
15008 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15009 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15010 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15011 default:
15012 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15013 }
15014 break;
15015
15016 default:
15017 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15018 }
15019 }
15020
15021 if (pVCpu->iem.s.cActiveMappings)
15022 iemMemRollback(pVCpu);
15023
15024 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15025}
15026
15027
15028/**
15029 * Interface for HM and EM for executing string I/O IN (read) instructions.
15030 *
15031 * This API ASSUMES that the caller has already verified that the guest code is
15032 * allowed to access the I/O port. (The I/O port is in the DX register in the
15033 * guest state.)
15034 *
15035 * @returns Strict VBox status code.
15036 * @param pVCpu The cross context virtual CPU structure.
15037 * @param cbValue The size of the I/O port access (1, 2, or 4).
15038 * @param enmAddrMode The addressing mode.
15039 * @param fRepPrefix Indicates whether a repeat prefix is used
15040 * (doesn't matter which for this instruction).
15041 * @param cbInstr The instruction length in bytes.
15042 * @param fIoChecked Whether the access to the I/O port has been
15043 * checked or not. It's typically checked in the
15044 * HM scenario.
15045 */
15046VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15047 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15048{
15049 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15050
15051 /*
15052 * State init.
15053 */
15054 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15055
15056 /*
15057 * Switch orgy for getting to the right handler.
15058 */
15059 VBOXSTRICTRC rcStrict;
15060 if (fRepPrefix)
15061 {
15062 switch (enmAddrMode)
15063 {
15064 case IEMMODE_16BIT:
15065 switch (cbValue)
15066 {
15067 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15068 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15069 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15070 default:
15071 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15072 }
15073 break;
15074
15075 case IEMMODE_32BIT:
15076 switch (cbValue)
15077 {
15078 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15079 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15080 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15081 default:
15082 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15083 }
15084 break;
15085
15086 case IEMMODE_64BIT:
15087 switch (cbValue)
15088 {
15089 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15090 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15091 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15092 default:
15093 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15094 }
15095 break;
15096
15097 default:
15098 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15099 }
15100 }
15101 else
15102 {
15103 switch (enmAddrMode)
15104 {
15105 case IEMMODE_16BIT:
15106 switch (cbValue)
15107 {
15108 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15109 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15110 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15111 default:
15112 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15113 }
15114 break;
15115
15116 case IEMMODE_32BIT:
15117 switch (cbValue)
15118 {
15119 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15120 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15121 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15122 default:
15123 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15124 }
15125 break;
15126
15127 case IEMMODE_64BIT:
15128 switch (cbValue)
15129 {
15130 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15131 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15132 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15133 default:
15134 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15135 }
15136 break;
15137
15138 default:
15139 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15140 }
15141 }
15142
15143 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15144 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15145}
15146
15147
15148/**
15149 * Interface for rawmode to write execute an OUT instruction.
15150 *
15151 * @returns Strict VBox status code.
15152 * @param pVCpu The cross context virtual CPU structure.
15153 * @param cbInstr The instruction length in bytes.
15154 * @param u16Port The port to read.
15155 * @param fImm Whether the port is specified using an immediate operand or
15156 * using the implicit DX register.
15157 * @param cbReg The register size.
15158 *
15159 * @remarks In ring-0 not all of the state needs to be synced in.
15160 */
15161VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15162{
15163 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15164 Assert(cbReg <= 4 && cbReg != 3);
15165
15166 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15167 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15168 Assert(!pVCpu->iem.s.cActiveMappings);
15169 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15170}
15171
15172
15173/**
15174 * Interface for rawmode to write execute an IN instruction.
15175 *
15176 * @returns Strict VBox status code.
15177 * @param pVCpu The cross context virtual CPU structure.
15178 * @param cbInstr The instruction length in bytes.
15179 * @param u16Port The port to read.
15180 * @param fImm Whether the port is specified using an immediate operand or
15181 * using the implicit DX.
15182 * @param cbReg The register size.
15183 */
15184VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15185{
15186 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15187 Assert(cbReg <= 4 && cbReg != 3);
15188
15189 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15190 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15191 Assert(!pVCpu->iem.s.cActiveMappings);
15192 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15193}
15194
15195
15196/**
15197 * Interface for HM and EM to write to a CRx register.
15198 *
15199 * @returns Strict VBox status code.
15200 * @param pVCpu The cross context virtual CPU structure.
15201 * @param cbInstr The instruction length in bytes.
15202 * @param iCrReg The control register number (destination).
15203 * @param iGReg The general purpose register number (source).
15204 *
15205 * @remarks In ring-0 not all of the state needs to be synced in.
15206 */
15207VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15208{
15209 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15210 Assert(iCrReg < 16);
15211 Assert(iGReg < 16);
15212
15213 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15214 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15215 Assert(!pVCpu->iem.s.cActiveMappings);
15216 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15217}
15218
15219
15220/**
15221 * Interface for HM and EM to read from a CRx register.
15222 *
15223 * @returns Strict VBox status code.
15224 * @param pVCpu The cross context virtual CPU structure.
15225 * @param cbInstr The instruction length in bytes.
15226 * @param iGReg The general purpose register number (destination).
15227 * @param iCrReg The control register number (source).
15228 *
15229 * @remarks In ring-0 not all of the state needs to be synced in.
15230 */
15231VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15232{
15233 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15234 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15235 | CPUMCTX_EXTRN_APIC_TPR);
15236 Assert(iCrReg < 16);
15237 Assert(iGReg < 16);
15238
15239 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15240 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15241 Assert(!pVCpu->iem.s.cActiveMappings);
15242 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15243}
15244
15245
15246/**
15247 * Interface for HM and EM to clear the CR0[TS] bit.
15248 *
15249 * @returns Strict VBox status code.
15250 * @param pVCpu The cross context virtual CPU structure.
15251 * @param cbInstr The instruction length in bytes.
15252 *
15253 * @remarks In ring-0 not all of the state needs to be synced in.
15254 */
15255VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15256{
15257 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15258
15259 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15260 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15261 Assert(!pVCpu->iem.s.cActiveMappings);
15262 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15263}
15264
15265
15266/**
15267 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15268 *
15269 * @returns Strict VBox status code.
15270 * @param pVCpu The cross context virtual CPU structure.
15271 * @param cbInstr The instruction length in bytes.
15272 * @param uValue The value to load into CR0.
15273 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15274 * memory operand. Otherwise pass NIL_RTGCPTR.
15275 *
15276 * @remarks In ring-0 not all of the state needs to be synced in.
15277 */
15278VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15279{
15280 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15281
15282 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15283 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15284 Assert(!pVCpu->iem.s.cActiveMappings);
15285 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15286}
15287
15288
15289/**
15290 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15291 *
15292 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15293 *
15294 * @returns Strict VBox status code.
15295 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15296 * @param cbInstr The instruction length in bytes.
15297 * @remarks In ring-0 not all of the state needs to be synced in.
15298 * @thread EMT(pVCpu)
15299 */
15300VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15301{
15302 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15303
15304 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15305 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15306 Assert(!pVCpu->iem.s.cActiveMappings);
15307 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15308}
15309
15310
15311/**
15312 * Interface for HM and EM to emulate the WBINVD instruction.
15313 *
15314 * @returns Strict VBox status code.
15315 * @param pVCpu The cross context virtual CPU structure.
15316 * @param cbInstr The instruction length in bytes.
15317 *
15318 * @remarks In ring-0 not all of the state needs to be synced in.
15319 */
15320VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15321{
15322 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15323
15324 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15325 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15326 Assert(!pVCpu->iem.s.cActiveMappings);
15327 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15328}
15329
15330
15331/**
15332 * Interface for HM and EM to emulate the INVD instruction.
15333 *
15334 * @returns Strict VBox status code.
15335 * @param pVCpu The cross context virtual CPU structure.
15336 * @param cbInstr The instruction length in bytes.
15337 *
15338 * @remarks In ring-0 not all of the state needs to be synced in.
15339 */
15340VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15341{
15342 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15343
15344 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15345 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15346 Assert(!pVCpu->iem.s.cActiveMappings);
15347 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15348}
15349
15350
15351/**
15352 * Interface for HM and EM to emulate the INVLPG instruction.
15353 *
15354 * @returns Strict VBox status code.
15355 * @retval VINF_PGM_SYNC_CR3
15356 *
15357 * @param pVCpu The cross context virtual CPU structure.
15358 * @param cbInstr The instruction length in bytes.
15359 * @param GCPtrPage The effective address of the page to invalidate.
15360 *
15361 * @remarks In ring-0 not all of the state needs to be synced in.
15362 */
15363VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15364{
15365 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15366
15367 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15368 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15369 Assert(!pVCpu->iem.s.cActiveMappings);
15370 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15371}
15372
15373
15374/**
15375 * Interface for HM and EM to emulate the CPUID instruction.
15376 *
15377 * @returns Strict VBox status code.
15378 *
15379 * @param pVCpu The cross context virtual CPU structure.
15380 * @param cbInstr The instruction length in bytes.
15381 *
15382 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15383 */
15384VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15385{
15386 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15387 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15388
15389 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15390 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15391 Assert(!pVCpu->iem.s.cActiveMappings);
15392 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15393}
15394
15395
15396/**
15397 * Interface for HM and EM to emulate the RDPMC instruction.
15398 *
15399 * @returns Strict VBox status code.
15400 *
15401 * @param pVCpu The cross context virtual CPU structure.
15402 * @param cbInstr The instruction length in bytes.
15403 *
15404 * @remarks Not all of the state needs to be synced in.
15405 */
15406VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15407{
15408 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15409 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15410
15411 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15412 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15413 Assert(!pVCpu->iem.s.cActiveMappings);
15414 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15415}
15416
15417
15418/**
15419 * Interface for HM and EM to emulate the RDTSC instruction.
15420 *
15421 * @returns Strict VBox status code.
15422 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15423 *
15424 * @param pVCpu The cross context virtual CPU structure.
15425 * @param cbInstr The instruction length in bytes.
15426 *
15427 * @remarks Not all of the state needs to be synced in.
15428 */
15429VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15430{
15431 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15432 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15433
15434 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15435 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15436 Assert(!pVCpu->iem.s.cActiveMappings);
15437 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15438}
15439
15440
15441/**
15442 * Interface for HM and EM to emulate the RDTSCP instruction.
15443 *
15444 * @returns Strict VBox status code.
15445 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15446 *
15447 * @param pVCpu The cross context virtual CPU structure.
15448 * @param cbInstr The instruction length in bytes.
15449 *
15450 * @remarks Not all of the state needs to be synced in. Recommended
15451 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15452 */
15453VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15454{
15455 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15456 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15457
15458 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15459 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15460 Assert(!pVCpu->iem.s.cActiveMappings);
15461 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15462}
15463
15464
15465/**
15466 * Interface for HM and EM to emulate the RDMSR instruction.
15467 *
15468 * @returns Strict VBox status code.
15469 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15470 *
15471 * @param pVCpu The cross context virtual CPU structure.
15472 * @param cbInstr The instruction length in bytes.
15473 *
15474 * @remarks Not all of the state needs to be synced in. Requires RCX and
15475 * (currently) all MSRs.
15476 */
15477VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15478{
15479 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15480 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15481
15482 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15483 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15484 Assert(!pVCpu->iem.s.cActiveMappings);
15485 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15486}
15487
15488
15489/**
15490 * Interface for HM and EM to emulate the WRMSR instruction.
15491 *
15492 * @returns Strict VBox status code.
15493 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15494 *
15495 * @param pVCpu The cross context virtual CPU structure.
15496 * @param cbInstr The instruction length in bytes.
15497 *
15498 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15499 * and (currently) all MSRs.
15500 */
15501VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15502{
15503 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15504 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15505 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15506
15507 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15508 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15509 Assert(!pVCpu->iem.s.cActiveMappings);
15510 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15511}
15512
15513
15514/**
15515 * Interface for HM and EM to emulate the MONITOR instruction.
15516 *
15517 * @returns Strict VBox status code.
15518 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15519 *
15520 * @param pVCpu The cross context virtual CPU structure.
15521 * @param cbInstr The instruction length in bytes.
15522 *
15523 * @remarks Not all of the state needs to be synced in.
15524 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15525 * are used.
15526 */
15527VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15528{
15529 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15530 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15531
15532 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15533 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15534 Assert(!pVCpu->iem.s.cActiveMappings);
15535 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15536}
15537
15538
15539/**
15540 * Interface for HM and EM to emulate the MWAIT instruction.
15541 *
15542 * @returns Strict VBox status code.
15543 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15544 *
15545 * @param pVCpu The cross context virtual CPU structure.
15546 * @param cbInstr The instruction length in bytes.
15547 *
15548 * @remarks Not all of the state needs to be synced in.
15549 */
15550VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15551{
15552 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15553
15554 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15555 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15556 Assert(!pVCpu->iem.s.cActiveMappings);
15557 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15558}
15559
15560
15561/**
15562 * Interface for HM and EM to emulate the HLT instruction.
15563 *
15564 * @returns Strict VBox status code.
15565 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15566 *
15567 * @param pVCpu The cross context virtual CPU structure.
15568 * @param cbInstr The instruction length in bytes.
15569 *
15570 * @remarks Not all of the state needs to be synced in.
15571 */
15572VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15573{
15574 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15575
15576 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15577 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15578 Assert(!pVCpu->iem.s.cActiveMappings);
15579 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15580}
15581
15582
15583/**
15584 * Checks if IEM is in the process of delivering an event (interrupt or
15585 * exception).
15586 *
15587 * @returns true if we're in the process of raising an interrupt or exception,
15588 * false otherwise.
15589 * @param pVCpu The cross context virtual CPU structure.
15590 * @param puVector Where to store the vector associated with the
15591 * currently delivered event, optional.
15592 * @param pfFlags Where to store th event delivery flags (see
15593 * IEM_XCPT_FLAGS_XXX), optional.
15594 * @param puErr Where to store the error code associated with the
15595 * event, optional.
15596 * @param puCr2 Where to store the CR2 associated with the event,
15597 * optional.
15598 * @remarks The caller should check the flags to determine if the error code and
15599 * CR2 are valid for the event.
15600 */
15601VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15602{
15603 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15604 if (fRaisingXcpt)
15605 {
15606 if (puVector)
15607 *puVector = pVCpu->iem.s.uCurXcpt;
15608 if (pfFlags)
15609 *pfFlags = pVCpu->iem.s.fCurXcpt;
15610 if (puErr)
15611 *puErr = pVCpu->iem.s.uCurXcptErr;
15612 if (puCr2)
15613 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15614 }
15615 return fRaisingXcpt;
15616}
15617
15618#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15619
15620/**
15621 * Interface for HM and EM to emulate the CLGI instruction.
15622 *
15623 * @returns Strict VBox status code.
15624 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15625 * @param cbInstr The instruction length in bytes.
15626 * @thread EMT(pVCpu)
15627 */
15628VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15629{
15630 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15631
15632 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15633 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15634 Assert(!pVCpu->iem.s.cActiveMappings);
15635 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15636}
15637
15638
15639/**
15640 * Interface for HM and EM to emulate the STGI instruction.
15641 *
15642 * @returns Strict VBox status code.
15643 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15644 * @param cbInstr The instruction length in bytes.
15645 * @thread EMT(pVCpu)
15646 */
15647VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15648{
15649 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15650
15651 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15652 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15653 Assert(!pVCpu->iem.s.cActiveMappings);
15654 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15655}
15656
15657
15658/**
15659 * Interface for HM and EM to emulate the VMLOAD instruction.
15660 *
15661 * @returns Strict VBox status code.
15662 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15663 * @param cbInstr The instruction length in bytes.
15664 * @thread EMT(pVCpu)
15665 */
15666VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15667{
15668 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15669
15670 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15671 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15672 Assert(!pVCpu->iem.s.cActiveMappings);
15673 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15674}
15675
15676
15677/**
15678 * Interface for HM and EM to emulate the VMSAVE instruction.
15679 *
15680 * @returns Strict VBox status code.
15681 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15682 * @param cbInstr The instruction length in bytes.
15683 * @thread EMT(pVCpu)
15684 */
15685VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15686{
15687 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15688
15689 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15690 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15691 Assert(!pVCpu->iem.s.cActiveMappings);
15692 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15693}
15694
15695
15696/**
15697 * Interface for HM and EM to emulate the INVLPGA instruction.
15698 *
15699 * @returns Strict VBox status code.
15700 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15701 * @param cbInstr The instruction length in bytes.
15702 * @thread EMT(pVCpu)
15703 */
15704VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15705{
15706 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15707
15708 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15709 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15710 Assert(!pVCpu->iem.s.cActiveMappings);
15711 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15712}
15713
15714
15715/**
15716 * Interface for HM and EM to emulate the VMRUN instruction.
15717 *
15718 * @returns Strict VBox status code.
15719 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15720 * @param cbInstr The instruction length in bytes.
15721 * @thread EMT(pVCpu)
15722 */
15723VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15724{
15725 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15726 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15727
15728 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15729 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15730 Assert(!pVCpu->iem.s.cActiveMappings);
15731 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15732}
15733
15734
15735/**
15736 * Interface for HM and EM to emulate \#VMEXIT.
15737 *
15738 * @returns Strict VBox status code.
15739 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15740 * @param uExitCode The exit code.
15741 * @param uExitInfo1 The exit info. 1 field.
15742 * @param uExitInfo2 The exit info. 2 field.
15743 * @thread EMT(pVCpu)
15744 */
15745VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15746{
15747 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15748 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15749 if (pVCpu->iem.s.cActiveMappings)
15750 iemMemRollback(pVCpu);
15751 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15752}
15753
15754#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15755
15756#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15757
15758/**
15759 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15760 *
15761 * @returns Strict VBox status code.
15762 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15763 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15764 * the x2APIC device.
15765 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15766 *
15767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15768 * @param idMsr The MSR being read.
15769 * @param pu64Value Pointer to the value being written or where to store the
15770 * value being read.
15771 * @param fWrite Whether this is an MSR write or read access.
15772 * @thread EMT(pVCpu)
15773 */
15774VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15775{
15776 Assert(pu64Value);
15777
15778 VBOXSTRICTRC rcStrict;
15779 if (!fWrite)
15780 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15781 else
15782 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15783 if (pVCpu->iem.s.cActiveMappings)
15784 iemMemRollback(pVCpu);
15785 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15786
15787}
15788
15789
15790/**
15791 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15792 *
15793 * @returns Strict VBox status code.
15794 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15795 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15796 *
15797 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15798 * @param offAccess The offset of the register being accessed (within the
15799 * APIC-access page).
15800 * @param cbAccess The size of the access in bytes.
15801 * @param pvData Pointer to the data being written or where to store the data
15802 * being read.
15803 * @param fWrite Whether this is a write or read access.
15804 * @thread EMT(pVCpu)
15805 */
15806VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
15807 bool fWrite)
15808{
15809 Assert(pvData);
15810
15811 /** @todo NSTVMX: Unfortunately, the caller has no idea about instruction fetch
15812 * accesses, so we only use read/write here. Maybe in the future the PGM
15813 * physical handler will be extended to include this information? */
15814 uint32_t const fAccess = fWrite ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
15815 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbAccess, pvData, fAccess);
15816 if (pVCpu->iem.s.cActiveMappings)
15817 iemMemRollback(pVCpu);
15818 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15819}
15820
15821
15822/**
15823 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15824 * VM-exit.
15825 *
15826 * @returns Strict VBox status code.
15827 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15828 * @thread EMT(pVCpu)
15829 */
15830VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15831{
15832 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15833 if (pVCpu->iem.s.cActiveMappings)
15834 iemMemRollback(pVCpu);
15835 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15836}
15837
15838
15839/**
15840 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15841 *
15842 * @returns Strict VBox status code.
15843 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15844 * @thread EMT(pVCpu)
15845 */
15846VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15847{
15848 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15849 if (pVCpu->iem.s.cActiveMappings)
15850 iemMemRollback(pVCpu);
15851 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15852}
15853
15854
15855/**
15856 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15857 *
15858 * @returns Strict VBox status code.
15859 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15860 * @param uVector The external interrupt vector (pass 0 if the external
15861 * interrupt is still pending).
15862 * @param fIntPending Whether the external interrupt is pending or
15863 * acknowdledged in the interrupt controller.
15864 * @thread EMT(pVCpu)
15865 */
15866VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15867{
15868 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15869 if (pVCpu->iem.s.cActiveMappings)
15870 iemMemRollback(pVCpu);
15871 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15872}
15873
15874
15875/**
15876 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15877 *
15878 * @returns Strict VBox status code.
15879 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15880 * @param uVector The SIPI vector.
15881 * @thread EMT(pVCpu)
15882 */
15883VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15884{
15885 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15886 if (pVCpu->iem.s.cActiveMappings)
15887 iemMemRollback(pVCpu);
15888 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15889}
15890
15891
15892/**
15893 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15894 *
15895 * @returns Strict VBox status code.
15896 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15897 * @thread EMT(pVCpu)
15898 */
15899VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15900{
15901 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15902 if (pVCpu->iem.s.cActiveMappings)
15903 iemMemRollback(pVCpu);
15904 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15905}
15906
15907
15908/**
15909 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15910 *
15911 * @returns Strict VBox status code.
15912 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15913 * @thread EMT(pVCpu)
15914 */
15915VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15916{
15917 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15918 if (pVCpu->iem.s.cActiveMappings)
15919 iemMemRollback(pVCpu);
15920 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15921}
15922
15923
15924/**
15925 * Interface for HM and EM to emulate VM-exits for NMI-windows.
15926 *
15927 * @returns Strict VBox status code.
15928 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15929 * @thread EMT(pVCpu)
15930 */
15931VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmiWindow(PVMCPU pVCpu)
15932{
15933 VBOXSTRICTRC rcStrict = iemVmxVmexitNmiWindow(pVCpu);
15934 if (pVCpu->iem.s.cActiveMappings)
15935 iemMemRollback(pVCpu);
15936 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15937}
15938
15939
15940/**
15941 * Interface for HM and EM to emulate VM-exits Monitor-Trap Flag (MTF).
15942 *
15943 * @returns Strict VBox status code.
15944 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15945 * @thread EMT(pVCpu)
15946 */
15947VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitMtf(PVMCPU pVCpu)
15948{
15949 VBOXSTRICTRC rcStrict = iemVmxVmexitMtf(pVCpu);
15950 if (pVCpu->iem.s.cActiveMappings)
15951 iemMemRollback(pVCpu);
15952 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15953}
15954
15955
15956/**
15957 * Interface for HM and EM to emulate the VMREAD instruction.
15958 *
15959 * @returns Strict VBox status code.
15960 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15961 * @param pExitInfo Pointer to the VM-exit information struct.
15962 * @thread EMT(pVCpu)
15963 */
15964VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15965{
15966 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15967 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15968 Assert(pExitInfo);
15969
15970 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15971
15972 VBOXSTRICTRC rcStrict;
15973 uint8_t const cbInstr = pExitInfo->cbInstr;
15974 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15975 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15976 {
15977 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15978 {
15979 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15980 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15981 }
15982 else
15983 {
15984 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15985 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15986 }
15987 }
15988 else
15989 {
15990 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15991 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15992 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15993 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15994 }
15995 Assert(!pVCpu->iem.s.cActiveMappings);
15996 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15997}
15998
15999
16000/**
16001 * Interface for HM and EM to emulate the VMWRITE instruction.
16002 *
16003 * @returns Strict VBox status code.
16004 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16005 * @param pExitInfo Pointer to the VM-exit information struct.
16006 * @thread EMT(pVCpu)
16007 */
16008VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16009{
16010 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16011 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16012 Assert(pExitInfo);
16013
16014 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16015
16016 uint64_t u64Val;
16017 uint8_t iEffSeg;
16018 IEMMODE enmEffAddrMode;
16019 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16020 {
16021 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16022 iEffSeg = UINT8_MAX;
16023 enmEffAddrMode = UINT8_MAX;
16024 }
16025 else
16026 {
16027 u64Val = pExitInfo->GCPtrEffAddr;
16028 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16029 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
16030 }
16031 uint8_t const cbInstr = pExitInfo->cbInstr;
16032 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16033 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
16034 Assert(!pVCpu->iem.s.cActiveMappings);
16035 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16036}
16037
16038
16039/**
16040 * Interface for HM and EM to emulate the VMPTRLD instruction.
16041 *
16042 * @returns Strict VBox status code.
16043 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16044 * @param pExitInfo Pointer to the VM-exit information struct.
16045 * @thread EMT(pVCpu)
16046 */
16047VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16048{
16049 Assert(pExitInfo);
16050 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16051 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16052
16053 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16054
16055 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16056 uint8_t const cbInstr = pExitInfo->cbInstr;
16057 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16058 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16059 Assert(!pVCpu->iem.s.cActiveMappings);
16060 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16061}
16062
16063
16064/**
16065 * Interface for HM and EM to emulate the VMPTRST instruction.
16066 *
16067 * @returns Strict VBox status code.
16068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16069 * @param pExitInfo Pointer to the VM-exit information struct.
16070 * @thread EMT(pVCpu)
16071 */
16072VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16073{
16074 Assert(pExitInfo);
16075 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16076 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16077
16078 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16079
16080 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16081 uint8_t const cbInstr = pExitInfo->cbInstr;
16082 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16083 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16084 Assert(!pVCpu->iem.s.cActiveMappings);
16085 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16086}
16087
16088
16089/**
16090 * Interface for HM and EM to emulate the VMCLEAR instruction.
16091 *
16092 * @returns Strict VBox status code.
16093 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16094 * @param pExitInfo Pointer to the VM-exit information struct.
16095 * @thread EMT(pVCpu)
16096 */
16097VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16098{
16099 Assert(pExitInfo);
16100 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16101 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16102
16103 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16104
16105 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16106 uint8_t const cbInstr = pExitInfo->cbInstr;
16107 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16108 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16109 Assert(!pVCpu->iem.s.cActiveMappings);
16110 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16111}
16112
16113
16114/**
16115 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16116 *
16117 * @returns Strict VBox status code.
16118 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16119 * @param cbInstr The instruction length in bytes.
16120 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16121 * VMXINSTRID_VMRESUME).
16122 * @thread EMT(pVCpu)
16123 */
16124VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16125{
16126 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16127 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16128
16129 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16130 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16131 Assert(!pVCpu->iem.s.cActiveMappings);
16132 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16133}
16134
16135
16136/**
16137 * Interface for HM and EM to emulate the VMXON instruction.
16138 *
16139 * @returns Strict VBox status code.
16140 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16141 * @param pExitInfo Pointer to the VM-exit information struct.
16142 * @thread EMT(pVCpu)
16143 */
16144VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16145{
16146 Assert(pExitInfo);
16147 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16148 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16149
16150 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16151
16152 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16153 uint8_t const cbInstr = pExitInfo->cbInstr;
16154 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16155 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16156 Assert(!pVCpu->iem.s.cActiveMappings);
16157 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16158}
16159
16160
16161/**
16162 * Interface for HM and EM to emulate the VMXOFF instruction.
16163 *
16164 * @returns Strict VBox status code.
16165 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16166 * @param cbInstr The instruction length in bytes.
16167 * @thread EMT(pVCpu)
16168 */
16169VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16170{
16171 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16172 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16173
16174 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16175 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16176 Assert(!pVCpu->iem.s.cActiveMappings);
16177 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16178}
16179
16180
16181/**
16182 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16183 *
16184 * @remarks The @a pvUser argument is currently unused.
16185 */
16186PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16187 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16188 PGMACCESSORIGIN enmOrigin, void *pvUser)
16189{
16190 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16191
16192 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16193 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16194 {
16195 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16196 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16197
16198 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16199 * Currently they will go through as read accesses. */
16200 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16201 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16202 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16203 if (RT_FAILURE(rcStrict))
16204 return rcStrict;
16205
16206 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16207 return VINF_SUCCESS;
16208 }
16209
16210 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16211 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16212 if (RT_FAILURE(rc))
16213 return rc;
16214
16215 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16216 return VINF_PGM_HANDLER_DO_DEFAULT;
16217}
16218
16219#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16220
16221#ifdef IN_RING3
16222
16223/**
16224 * Handles the unlikely and probably fatal merge cases.
16225 *
16226 * @returns Merged status code.
16227 * @param rcStrict Current EM status code.
16228 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16229 * with @a rcStrict.
16230 * @param iMemMap The memory mapping index. For error reporting only.
16231 * @param pVCpu The cross context virtual CPU structure of the calling
16232 * thread, for error reporting only.
16233 */
16234DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16235 unsigned iMemMap, PVMCPU pVCpu)
16236{
16237 if (RT_FAILURE_NP(rcStrict))
16238 return rcStrict;
16239
16240 if (RT_FAILURE_NP(rcStrictCommit))
16241 return rcStrictCommit;
16242
16243 if (rcStrict == rcStrictCommit)
16244 return rcStrictCommit;
16245
16246 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16247 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16248 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16249 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16250 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16251 return VERR_IOM_FF_STATUS_IPE;
16252}
16253
16254
16255/**
16256 * Helper for IOMR3ProcessForceFlag.
16257 *
16258 * @returns Merged status code.
16259 * @param rcStrict Current EM status code.
16260 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16261 * with @a rcStrict.
16262 * @param iMemMap The memory mapping index. For error reporting only.
16263 * @param pVCpu The cross context virtual CPU structure of the calling
16264 * thread, for error reporting only.
16265 */
16266DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16267{
16268 /* Simple. */
16269 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16270 return rcStrictCommit;
16271
16272 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16273 return rcStrict;
16274
16275 /* EM scheduling status codes. */
16276 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16277 && rcStrict <= VINF_EM_LAST))
16278 {
16279 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16280 && rcStrictCommit <= VINF_EM_LAST))
16281 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16282 }
16283
16284 /* Unlikely */
16285 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16286}
16287
16288
16289/**
16290 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16291 *
16292 * @returns Merge between @a rcStrict and what the commit operation returned.
16293 * @param pVM The cross context VM structure.
16294 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16295 * @param rcStrict The status code returned by ring-0 or raw-mode.
16296 */
16297VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16298{
16299 /*
16300 * Reset the pending commit.
16301 */
16302 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16303 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16304 ("%#x %#x %#x\n",
16305 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16306 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16307
16308 /*
16309 * Commit the pending bounce buffers (usually just one).
16310 */
16311 unsigned cBufs = 0;
16312 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16313 while (iMemMap-- > 0)
16314 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16315 {
16316 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16317 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16318 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16319
16320 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16321 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16322 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16323
16324 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16325 {
16326 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16327 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16328 pbBuf,
16329 cbFirst,
16330 PGMACCESSORIGIN_IEM);
16331 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16332 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16333 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16334 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16335 }
16336
16337 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16338 {
16339 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16340 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16341 pbBuf + cbFirst,
16342 cbSecond,
16343 PGMACCESSORIGIN_IEM);
16344 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16345 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16346 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16347 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16348 }
16349 cBufs++;
16350 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16351 }
16352
16353 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16354 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16355 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16356 pVCpu->iem.s.cActiveMappings = 0;
16357 return rcStrict;
16358}
16359
16360#endif /* IN_RING3 */
16361
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette