VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 78402

Last change on this file since 78402 was 78237, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Remove some superfluous VM-exit handlers that can be handled by merely passing the VM-exit reason. Preparation of ring-0 VM-exit handling.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 646.6 KB
Line 
1/* $Id: IEMAll.cpp 78237 2019-04-22 04:35:20Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handle for triple faults.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
450 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmi(PVMCPU pVCpu);
987IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
988IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason);
989IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
990IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
991IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
992IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
993#endif
994
995#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
996IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
997IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
998#endif
999
1000
1001/**
1002 * Sets the pass up status.
1003 *
1004 * @returns VINF_SUCCESS.
1005 * @param pVCpu The cross context virtual CPU structure of the
1006 * calling thread.
1007 * @param rcPassUp The pass up status. Must be informational.
1008 * VINF_SUCCESS is not allowed.
1009 */
1010IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1011{
1012 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1013
1014 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1015 if (rcOldPassUp == VINF_SUCCESS)
1016 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1017 /* If both are EM scheduling codes, use EM priority rules. */
1018 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1019 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1020 {
1021 if (rcPassUp < rcOldPassUp)
1022 {
1023 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1024 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1025 }
1026 else
1027 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1028 }
1029 /* Override EM scheduling with specific status code. */
1030 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1031 {
1032 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1033 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1034 }
1035 /* Don't override specific status code, first come first served. */
1036 else
1037 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1038 return VINF_SUCCESS;
1039}
1040
1041
1042/**
1043 * Calculates the CPU mode.
1044 *
1045 * This is mainly for updating IEMCPU::enmCpuMode.
1046 *
1047 * @returns CPU mode.
1048 * @param pVCpu The cross context virtual CPU structure of the
1049 * calling thread.
1050 */
1051DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1052{
1053 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1054 return IEMMODE_64BIT;
1055 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1056 return IEMMODE_32BIT;
1057 return IEMMODE_16BIT;
1058}
1059
1060
1061/**
1062 * Initializes the execution state.
1063 *
1064 * @param pVCpu The cross context virtual CPU structure of the
1065 * calling thread.
1066 * @param fBypassHandlers Whether to bypass access handlers.
1067 *
1068 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1069 * side-effects in strict builds.
1070 */
1071DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1072{
1073 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1074 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1075
1076#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1084 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1085#endif
1086
1087#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1088 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1089#endif
1090 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1091 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1092#ifdef VBOX_STRICT
1093 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1094 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1095 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1096 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1097 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1098 pVCpu->iem.s.uRexReg = 127;
1099 pVCpu->iem.s.uRexB = 127;
1100 pVCpu->iem.s.offModRm = 127;
1101 pVCpu->iem.s.uRexIndex = 127;
1102 pVCpu->iem.s.iEffSeg = 127;
1103 pVCpu->iem.s.idxPrefix = 127;
1104 pVCpu->iem.s.uVex3rdReg = 127;
1105 pVCpu->iem.s.uVexLength = 127;
1106 pVCpu->iem.s.fEvexStuff = 127;
1107 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1108# ifdef IEM_WITH_CODE_TLB
1109 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1110 pVCpu->iem.s.pbInstrBuf = NULL;
1111 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1112 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1113 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1114 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1115# else
1116 pVCpu->iem.s.offOpcode = 127;
1117 pVCpu->iem.s.cbOpcode = 127;
1118# endif
1119#endif
1120
1121 pVCpu->iem.s.cActiveMappings = 0;
1122 pVCpu->iem.s.iNextMapping = 0;
1123 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1124 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1125#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1126 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1127 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1128 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1129 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1130 if (!pVCpu->iem.s.fInPatchCode)
1131 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1132#endif
1133}
1134
1135#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1136/**
1137 * Performs a minimal reinitialization of the execution state.
1138 *
1139 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1140 * 'world-switch' types operations on the CPU. Currently only nested
1141 * hardware-virtualization uses it.
1142 *
1143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1144 */
1145IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1146{
1147 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1148 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1149
1150 pVCpu->iem.s.uCpl = uCpl;
1151 pVCpu->iem.s.enmCpuMode = enmMode;
1152 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1153 pVCpu->iem.s.enmEffAddrMode = enmMode;
1154 if (enmMode != IEMMODE_64BIT)
1155 {
1156 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1157 pVCpu->iem.s.enmEffOpSize = enmMode;
1158 }
1159 else
1160 {
1161 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1162 pVCpu->iem.s.enmEffOpSize = enmMode;
1163 }
1164 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1165#ifndef IEM_WITH_CODE_TLB
1166 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1167 pVCpu->iem.s.offOpcode = 0;
1168 pVCpu->iem.s.cbOpcode = 0;
1169#endif
1170 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1171}
1172#endif
1173
1174/**
1175 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1176 *
1177 * @param pVCpu The cross context virtual CPU structure of the
1178 * calling thread.
1179 */
1180DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1181{
1182 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1183#ifdef VBOX_STRICT
1184# ifdef IEM_WITH_CODE_TLB
1185 NOREF(pVCpu);
1186# else
1187 pVCpu->iem.s.cbOpcode = 0;
1188# endif
1189#else
1190 NOREF(pVCpu);
1191#endif
1192}
1193
1194
1195/**
1196 * Initializes the decoder state.
1197 *
1198 * iemReInitDecoder is mostly a copy of this function.
1199 *
1200 * @param pVCpu The cross context virtual CPU structure of the
1201 * calling thread.
1202 * @param fBypassHandlers Whether to bypass access handlers.
1203 */
1204DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1205{
1206 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1207 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1208
1209#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1218#endif
1219
1220#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1221 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1222#endif
1223 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1224 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1225 pVCpu->iem.s.enmCpuMode = enmMode;
1226 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1227 pVCpu->iem.s.enmEffAddrMode = enmMode;
1228 if (enmMode != IEMMODE_64BIT)
1229 {
1230 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1231 pVCpu->iem.s.enmEffOpSize = enmMode;
1232 }
1233 else
1234 {
1235 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1236 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1237 }
1238 pVCpu->iem.s.fPrefixes = 0;
1239 pVCpu->iem.s.uRexReg = 0;
1240 pVCpu->iem.s.uRexB = 0;
1241 pVCpu->iem.s.uRexIndex = 0;
1242 pVCpu->iem.s.idxPrefix = 0;
1243 pVCpu->iem.s.uVex3rdReg = 0;
1244 pVCpu->iem.s.uVexLength = 0;
1245 pVCpu->iem.s.fEvexStuff = 0;
1246 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1247#ifdef IEM_WITH_CODE_TLB
1248 pVCpu->iem.s.pbInstrBuf = NULL;
1249 pVCpu->iem.s.offInstrNextByte = 0;
1250 pVCpu->iem.s.offCurInstrStart = 0;
1251# ifdef VBOX_STRICT
1252 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1253 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1254 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1255# endif
1256#else
1257 pVCpu->iem.s.offOpcode = 0;
1258 pVCpu->iem.s.cbOpcode = 0;
1259#endif
1260 pVCpu->iem.s.offModRm = 0;
1261 pVCpu->iem.s.cActiveMappings = 0;
1262 pVCpu->iem.s.iNextMapping = 0;
1263 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1264 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1265#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1266 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1267 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1268 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1269 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1270 if (!pVCpu->iem.s.fInPatchCode)
1271 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1272#endif
1273
1274#ifdef DBGFTRACE_ENABLED
1275 switch (enmMode)
1276 {
1277 case IEMMODE_64BIT:
1278 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1279 break;
1280 case IEMMODE_32BIT:
1281 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1282 break;
1283 case IEMMODE_16BIT:
1284 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1285 break;
1286 }
1287#endif
1288}
1289
1290
1291/**
1292 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1293 *
1294 * This is mostly a copy of iemInitDecoder.
1295 *
1296 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1297 */
1298DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1299{
1300 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1301
1302#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1311#endif
1312
1313 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1314 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1315 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1316 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1317 pVCpu->iem.s.enmEffAddrMode = enmMode;
1318 if (enmMode != IEMMODE_64BIT)
1319 {
1320 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1321 pVCpu->iem.s.enmEffOpSize = enmMode;
1322 }
1323 else
1324 {
1325 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1326 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1327 }
1328 pVCpu->iem.s.fPrefixes = 0;
1329 pVCpu->iem.s.uRexReg = 0;
1330 pVCpu->iem.s.uRexB = 0;
1331 pVCpu->iem.s.uRexIndex = 0;
1332 pVCpu->iem.s.idxPrefix = 0;
1333 pVCpu->iem.s.uVex3rdReg = 0;
1334 pVCpu->iem.s.uVexLength = 0;
1335 pVCpu->iem.s.fEvexStuff = 0;
1336 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1337#ifdef IEM_WITH_CODE_TLB
1338 if (pVCpu->iem.s.pbInstrBuf)
1339 {
1340 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1341 - pVCpu->iem.s.uInstrBufPc;
1342 if (off < pVCpu->iem.s.cbInstrBufTotal)
1343 {
1344 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1345 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1346 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1347 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1348 else
1349 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1350 }
1351 else
1352 {
1353 pVCpu->iem.s.pbInstrBuf = NULL;
1354 pVCpu->iem.s.offInstrNextByte = 0;
1355 pVCpu->iem.s.offCurInstrStart = 0;
1356 pVCpu->iem.s.cbInstrBuf = 0;
1357 pVCpu->iem.s.cbInstrBufTotal = 0;
1358 }
1359 }
1360 else
1361 {
1362 pVCpu->iem.s.offInstrNextByte = 0;
1363 pVCpu->iem.s.offCurInstrStart = 0;
1364 pVCpu->iem.s.cbInstrBuf = 0;
1365 pVCpu->iem.s.cbInstrBufTotal = 0;
1366 }
1367#else
1368 pVCpu->iem.s.cbOpcode = 0;
1369 pVCpu->iem.s.offOpcode = 0;
1370#endif
1371 pVCpu->iem.s.offModRm = 0;
1372 Assert(pVCpu->iem.s.cActiveMappings == 0);
1373 pVCpu->iem.s.iNextMapping = 0;
1374 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1375 Assert(pVCpu->iem.s.fBypassHandlers == false);
1376#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1377 if (!pVCpu->iem.s.fInPatchCode)
1378 { /* likely */ }
1379 else
1380 {
1381 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1382 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1383 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1384 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1385 if (!pVCpu->iem.s.fInPatchCode)
1386 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1387 }
1388#endif
1389
1390#ifdef DBGFTRACE_ENABLED
1391 switch (enmMode)
1392 {
1393 case IEMMODE_64BIT:
1394 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1395 break;
1396 case IEMMODE_32BIT:
1397 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1398 break;
1399 case IEMMODE_16BIT:
1400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1401 break;
1402 }
1403#endif
1404}
1405
1406
1407
1408/**
1409 * Prefetch opcodes the first time when starting executing.
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pVCpu The cross context virtual CPU structure of the
1413 * calling thread.
1414 * @param fBypassHandlers Whether to bypass access handlers.
1415 */
1416IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1417{
1418 iemInitDecoder(pVCpu, fBypassHandlers);
1419
1420#ifdef IEM_WITH_CODE_TLB
1421 /** @todo Do ITLB lookup here. */
1422
1423#else /* !IEM_WITH_CODE_TLB */
1424
1425 /*
1426 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1427 *
1428 * First translate CS:rIP to a physical address.
1429 */
1430 uint32_t cbToTryRead;
1431 RTGCPTR GCPtrPC;
1432 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1433 {
1434 cbToTryRead = PAGE_SIZE;
1435 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1436 if (IEM_IS_CANONICAL(GCPtrPC))
1437 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1438 else
1439 return iemRaiseGeneralProtectionFault0(pVCpu);
1440 }
1441 else
1442 {
1443 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1444 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1445 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1446 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1447 else
1448 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1449 if (cbToTryRead) { /* likely */ }
1450 else /* overflowed */
1451 {
1452 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1453 cbToTryRead = UINT32_MAX;
1454 }
1455 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1456 Assert(GCPtrPC <= UINT32_MAX);
1457 }
1458
1459# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1460 /* Allow interpretation of patch manager code blocks since they can for
1461 instance throw #PFs for perfectly good reasons. */
1462 if (pVCpu->iem.s.fInPatchCode)
1463 {
1464 size_t cbRead = 0;
1465 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1466 AssertRCReturn(rc, rc);
1467 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1468 return VINF_SUCCESS;
1469 }
1470# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1471
1472 RTGCPHYS GCPhys;
1473 uint64_t fFlags;
1474 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1475 if (RT_SUCCESS(rc)) { /* probable */ }
1476 else
1477 {
1478 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1479 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1480 }
1481 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1482 else
1483 {
1484 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1485 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1486 }
1487 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1488 else
1489 {
1490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1491 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1492 }
1493 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1494 /** @todo Check reserved bits and such stuff. PGM is better at doing
1495 * that, so do it when implementing the guest virtual address
1496 * TLB... */
1497
1498 /*
1499 * Read the bytes at this address.
1500 */
1501 PVM pVM = pVCpu->CTX_SUFF(pVM);
1502# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1503 size_t cbActual;
1504 if ( PATMIsEnabled(pVM)
1505 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1506 {
1507 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1508 Assert(cbActual > 0);
1509 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1510 }
1511 else
1512# endif
1513 {
1514 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1515 if (cbToTryRead > cbLeftOnPage)
1516 cbToTryRead = cbLeftOnPage;
1517 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1518 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1519
1520 if (!pVCpu->iem.s.fBypassHandlers)
1521 {
1522 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1523 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1524 { /* likely */ }
1525 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1526 {
1527 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1528 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1529 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1530 }
1531 else
1532 {
1533 Log((RT_SUCCESS(rcStrict)
1534 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1535 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1536 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1537 return rcStrict;
1538 }
1539 }
1540 else
1541 {
1542 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1543 if (RT_SUCCESS(rc))
1544 { /* likely */ }
1545 else
1546 {
1547 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1548 GCPtrPC, GCPhys, rc, cbToTryRead));
1549 return rc;
1550 }
1551 }
1552 pVCpu->iem.s.cbOpcode = cbToTryRead;
1553 }
1554#endif /* !IEM_WITH_CODE_TLB */
1555 return VINF_SUCCESS;
1556}
1557
1558
1559/**
1560 * Invalidates the IEM TLBs.
1561 *
1562 * This is called internally as well as by PGM when moving GC mappings.
1563 *
1564 * @returns
1565 * @param pVCpu The cross context virtual CPU structure of the calling
1566 * thread.
1567 * @param fVmm Set when PGM calls us with a remapping.
1568 */
1569VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1570{
1571#ifdef IEM_WITH_CODE_TLB
1572 pVCpu->iem.s.cbInstrBufTotal = 0;
1573 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1574 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1575 { /* very likely */ }
1576 else
1577 {
1578 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1579 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1580 while (i-- > 0)
1581 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1582 }
1583#endif
1584
1585#ifdef IEM_WITH_DATA_TLB
1586 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1587 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1588 { /* very likely */ }
1589 else
1590 {
1591 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1592 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1593 while (i-- > 0)
1594 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1595 }
1596#endif
1597 NOREF(pVCpu); NOREF(fVmm);
1598}
1599
1600
1601/**
1602 * Invalidates a page in the TLBs.
1603 *
1604 * @param pVCpu The cross context virtual CPU structure of the calling
1605 * thread.
1606 * @param GCPtr The address of the page to invalidate
1607 */
1608VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1609{
1610#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1611 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1612 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1613 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1614 uintptr_t idx = (uint8_t)GCPtr;
1615
1616# ifdef IEM_WITH_CODE_TLB
1617 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1618 {
1619 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1620 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1621 pVCpu->iem.s.cbInstrBufTotal = 0;
1622 }
1623# endif
1624
1625# ifdef IEM_WITH_DATA_TLB
1626 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1627 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1628# endif
1629#else
1630 NOREF(pVCpu); NOREF(GCPtr);
1631#endif
1632}
1633
1634
1635/**
1636 * Invalidates the host physical aspects of the IEM TLBs.
1637 *
1638 * This is called internally as well as by PGM when moving GC mappings.
1639 *
1640 * @param pVCpu The cross context virtual CPU structure of the calling
1641 * thread.
1642 */
1643VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1644{
1645#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1646 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1647
1648# ifdef IEM_WITH_CODE_TLB
1649 pVCpu->iem.s.cbInstrBufTotal = 0;
1650# endif
1651 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1652 if (uTlbPhysRev != 0)
1653 {
1654 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1655 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1656 }
1657 else
1658 {
1659 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1660 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1661
1662 unsigned i;
1663# ifdef IEM_WITH_CODE_TLB
1664 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1665 while (i-- > 0)
1666 {
1667 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1668 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1669 }
1670# endif
1671# ifdef IEM_WITH_DATA_TLB
1672 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1673 while (i-- > 0)
1674 {
1675 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1676 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1677 }
1678# endif
1679 }
1680#else
1681 NOREF(pVCpu);
1682#endif
1683}
1684
1685
1686/**
1687 * Invalidates the host physical aspects of the IEM TLBs.
1688 *
1689 * This is called internally as well as by PGM when moving GC mappings.
1690 *
1691 * @param pVM The cross context VM structure.
1692 *
1693 * @remarks Caller holds the PGM lock.
1694 */
1695VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1696{
1697 RT_NOREF_PV(pVM);
1698}
1699
1700#ifdef IEM_WITH_CODE_TLB
1701
1702/**
1703 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1704 * failure and jumps.
1705 *
1706 * We end up here for a number of reasons:
1707 * - pbInstrBuf isn't yet initialized.
1708 * - Advancing beyond the buffer boundrary (e.g. cross page).
1709 * - Advancing beyond the CS segment limit.
1710 * - Fetching from non-mappable page (e.g. MMIO).
1711 *
1712 * @param pVCpu The cross context virtual CPU structure of the
1713 * calling thread.
1714 * @param pvDst Where to return the bytes.
1715 * @param cbDst Number of bytes to read.
1716 *
1717 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1718 */
1719IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1720{
1721#ifdef IN_RING3
1722 for (;;)
1723 {
1724 Assert(cbDst <= 8);
1725 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1726
1727 /*
1728 * We might have a partial buffer match, deal with that first to make the
1729 * rest simpler. This is the first part of the cross page/buffer case.
1730 */
1731 if (pVCpu->iem.s.pbInstrBuf != NULL)
1732 {
1733 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1734 {
1735 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1736 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1737 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1738
1739 cbDst -= cbCopy;
1740 pvDst = (uint8_t *)pvDst + cbCopy;
1741 offBuf += cbCopy;
1742 pVCpu->iem.s.offInstrNextByte += offBuf;
1743 }
1744 }
1745
1746 /*
1747 * Check segment limit, figuring how much we're allowed to access at this point.
1748 *
1749 * We will fault immediately if RIP is past the segment limit / in non-canonical
1750 * territory. If we do continue, there are one or more bytes to read before we
1751 * end up in trouble and we need to do that first before faulting.
1752 */
1753 RTGCPTR GCPtrFirst;
1754 uint32_t cbMaxRead;
1755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1756 {
1757 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1758 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1759 { /* likely */ }
1760 else
1761 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1762 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1763 }
1764 else
1765 {
1766 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1767 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1768 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1769 { /* likely */ }
1770 else
1771 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1772 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1773 if (cbMaxRead != 0)
1774 { /* likely */ }
1775 else
1776 {
1777 /* Overflowed because address is 0 and limit is max. */
1778 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1779 cbMaxRead = X86_PAGE_SIZE;
1780 }
1781 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1782 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1783 if (cbMaxRead2 < cbMaxRead)
1784 cbMaxRead = cbMaxRead2;
1785 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1786 }
1787
1788 /*
1789 * Get the TLB entry for this piece of code.
1790 */
1791 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1792 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1793 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1794 if (pTlbe->uTag == uTag)
1795 {
1796 /* likely when executing lots of code, otherwise unlikely */
1797# ifdef VBOX_WITH_STATISTICS
1798 pVCpu->iem.s.CodeTlb.cTlbHits++;
1799# endif
1800 }
1801 else
1802 {
1803 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1804# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1805 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1806 {
1807 pTlbe->uTag = uTag;
1808 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1809 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1810 pTlbe->GCPhys = NIL_RTGCPHYS;
1811 pTlbe->pbMappingR3 = NULL;
1812 }
1813 else
1814# endif
1815 {
1816 RTGCPHYS GCPhys;
1817 uint64_t fFlags;
1818 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1819 if (RT_FAILURE(rc))
1820 {
1821 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1822 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1823 }
1824
1825 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1826 pTlbe->uTag = uTag;
1827 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1828 pTlbe->GCPhys = GCPhys;
1829 pTlbe->pbMappingR3 = NULL;
1830 }
1831 }
1832
1833 /*
1834 * Check TLB page table level access flags.
1835 */
1836 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1837 {
1838 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1839 {
1840 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1841 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1842 }
1843 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1844 {
1845 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1846 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1847 }
1848 }
1849
1850# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1851 /*
1852 * Allow interpretation of patch manager code blocks since they can for
1853 * instance throw #PFs for perfectly good reasons.
1854 */
1855 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1856 { /* no unlikely */ }
1857 else
1858 {
1859 /** @todo Could be optimized this a little in ring-3 if we liked. */
1860 size_t cbRead = 0;
1861 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1862 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1863 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1864 return;
1865 }
1866# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1867
1868 /*
1869 * Look up the physical page info if necessary.
1870 */
1871 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1872 { /* not necessary */ }
1873 else
1874 {
1875 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1876 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1877 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1878 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1879 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1880 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1881 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1882 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1883 }
1884
1885# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1886 /*
1887 * Try do a direct read using the pbMappingR3 pointer.
1888 */
1889 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1890 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1891 {
1892 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1893 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1894 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1895 {
1896 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1897 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1898 }
1899 else
1900 {
1901 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1902 Assert(cbInstr < cbMaxRead);
1903 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1904 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1905 }
1906 if (cbDst <= cbMaxRead)
1907 {
1908 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1909 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1910 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1911 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1912 return;
1913 }
1914 pVCpu->iem.s.pbInstrBuf = NULL;
1915
1916 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1917 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1918 }
1919 else
1920# endif
1921#if 0
1922 /*
1923 * If there is no special read handling, so we can read a bit more and
1924 * put it in the prefetch buffer.
1925 */
1926 if ( cbDst < cbMaxRead
1927 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1928 {
1929 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1930 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1931 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1932 { /* likely */ }
1933 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1934 {
1935 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1936 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1937 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1938 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1939 }
1940 else
1941 {
1942 Log((RT_SUCCESS(rcStrict)
1943 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1944 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1945 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1946 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1947 }
1948 }
1949 /*
1950 * Special read handling, so only read exactly what's needed.
1951 * This is a highly unlikely scenario.
1952 */
1953 else
1954#endif
1955 {
1956 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1957 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1958 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1959 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1960 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1961 { /* likely */ }
1962 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1963 {
1964 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1965 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1966 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1967 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1968 }
1969 else
1970 {
1971 Log((RT_SUCCESS(rcStrict)
1972 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1973 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1974 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1975 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1976 }
1977 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1978 if (cbToRead == cbDst)
1979 return;
1980 }
1981
1982 /*
1983 * More to read, loop.
1984 */
1985 cbDst -= cbMaxRead;
1986 pvDst = (uint8_t *)pvDst + cbMaxRead;
1987 }
1988#else
1989 RT_NOREF(pvDst, cbDst);
1990 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1991#endif
1992}
1993
1994#else
1995
1996/**
1997 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1998 * exception if it fails.
1999 *
2000 * @returns Strict VBox status code.
2001 * @param pVCpu The cross context virtual CPU structure of the
2002 * calling thread.
2003 * @param cbMin The minimum number of bytes relative offOpcode
2004 * that must be read.
2005 */
2006IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2007{
2008 /*
2009 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2010 *
2011 * First translate CS:rIP to a physical address.
2012 */
2013 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2014 uint32_t cbToTryRead;
2015 RTGCPTR GCPtrNext;
2016 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2017 {
2018 cbToTryRead = PAGE_SIZE;
2019 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2020 if (!IEM_IS_CANONICAL(GCPtrNext))
2021 return iemRaiseGeneralProtectionFault0(pVCpu);
2022 }
2023 else
2024 {
2025 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2026 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2027 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2028 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2029 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2030 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2031 if (!cbToTryRead) /* overflowed */
2032 {
2033 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2034 cbToTryRead = UINT32_MAX;
2035 /** @todo check out wrapping around the code segment. */
2036 }
2037 if (cbToTryRead < cbMin - cbLeft)
2038 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2039 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2040 }
2041
2042 /* Only read up to the end of the page, and make sure we don't read more
2043 than the opcode buffer can hold. */
2044 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2045 if (cbToTryRead > cbLeftOnPage)
2046 cbToTryRead = cbLeftOnPage;
2047 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2048 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2049/** @todo r=bird: Convert assertion into undefined opcode exception? */
2050 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2051
2052# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2053 /* Allow interpretation of patch manager code blocks since they can for
2054 instance throw #PFs for perfectly good reasons. */
2055 if (pVCpu->iem.s.fInPatchCode)
2056 {
2057 size_t cbRead = 0;
2058 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2059 AssertRCReturn(rc, rc);
2060 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2061 return VINF_SUCCESS;
2062 }
2063# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2064
2065 RTGCPHYS GCPhys;
2066 uint64_t fFlags;
2067 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2068 if (RT_FAILURE(rc))
2069 {
2070 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2071 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2072 }
2073 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2074 {
2075 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2076 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2077 }
2078 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2079 {
2080 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2081 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2082 }
2083 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2084 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2085 /** @todo Check reserved bits and such stuff. PGM is better at doing
2086 * that, so do it when implementing the guest virtual address
2087 * TLB... */
2088
2089 /*
2090 * Read the bytes at this address.
2091 *
2092 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2093 * and since PATM should only patch the start of an instruction there
2094 * should be no need to check again here.
2095 */
2096 if (!pVCpu->iem.s.fBypassHandlers)
2097 {
2098 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2099 cbToTryRead, PGMACCESSORIGIN_IEM);
2100 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2101 { /* likely */ }
2102 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2103 {
2104 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2105 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2106 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2107 }
2108 else
2109 {
2110 Log((RT_SUCCESS(rcStrict)
2111 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2112 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2113 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2114 return rcStrict;
2115 }
2116 }
2117 else
2118 {
2119 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2120 if (RT_SUCCESS(rc))
2121 { /* likely */ }
2122 else
2123 {
2124 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2125 return rc;
2126 }
2127 }
2128 pVCpu->iem.s.cbOpcode += cbToTryRead;
2129 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2130
2131 return VINF_SUCCESS;
2132}
2133
2134#endif /* !IEM_WITH_CODE_TLB */
2135#ifndef IEM_WITH_SETJMP
2136
2137/**
2138 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2139 *
2140 * @returns Strict VBox status code.
2141 * @param pVCpu The cross context virtual CPU structure of the
2142 * calling thread.
2143 * @param pb Where to return the opcode byte.
2144 */
2145DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2146{
2147 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2148 if (rcStrict == VINF_SUCCESS)
2149 {
2150 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2151 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2152 pVCpu->iem.s.offOpcode = offOpcode + 1;
2153 }
2154 else
2155 *pb = 0;
2156 return rcStrict;
2157}
2158
2159
2160/**
2161 * Fetches the next opcode byte.
2162 *
2163 * @returns Strict VBox status code.
2164 * @param pVCpu The cross context virtual CPU structure of the
2165 * calling thread.
2166 * @param pu8 Where to return the opcode byte.
2167 */
2168DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2169{
2170 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2171 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2172 {
2173 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2174 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2175 return VINF_SUCCESS;
2176 }
2177 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2178}
2179
2180#else /* IEM_WITH_SETJMP */
2181
2182/**
2183 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2184 *
2185 * @returns The opcode byte.
2186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2187 */
2188DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2189{
2190# ifdef IEM_WITH_CODE_TLB
2191 uint8_t u8;
2192 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2193 return u8;
2194# else
2195 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2196 if (rcStrict == VINF_SUCCESS)
2197 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2198 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2199# endif
2200}
2201
2202
2203/**
2204 * Fetches the next opcode byte, longjmp on error.
2205 *
2206 * @returns The opcode byte.
2207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2208 */
2209DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2210{
2211# ifdef IEM_WITH_CODE_TLB
2212 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2213 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2214 if (RT_LIKELY( pbBuf != NULL
2215 && offBuf < pVCpu->iem.s.cbInstrBuf))
2216 {
2217 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2218 return pbBuf[offBuf];
2219 }
2220# else
2221 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2222 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2223 {
2224 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2225 return pVCpu->iem.s.abOpcode[offOpcode];
2226 }
2227# endif
2228 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2229}
2230
2231#endif /* IEM_WITH_SETJMP */
2232
2233/**
2234 * Fetches the next opcode byte, returns automatically on failure.
2235 *
2236 * @param a_pu8 Where to return the opcode byte.
2237 * @remark Implicitly references pVCpu.
2238 */
2239#ifndef IEM_WITH_SETJMP
2240# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2241 do \
2242 { \
2243 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2244 if (rcStrict2 == VINF_SUCCESS) \
2245 { /* likely */ } \
2246 else \
2247 return rcStrict2; \
2248 } while (0)
2249#else
2250# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2251#endif /* IEM_WITH_SETJMP */
2252
2253
2254#ifndef IEM_WITH_SETJMP
2255/**
2256 * Fetches the next signed byte from the opcode stream.
2257 *
2258 * @returns Strict VBox status code.
2259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2260 * @param pi8 Where to return the signed byte.
2261 */
2262DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2263{
2264 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2265}
2266#endif /* !IEM_WITH_SETJMP */
2267
2268
2269/**
2270 * Fetches the next signed byte from the opcode stream, returning automatically
2271 * on failure.
2272 *
2273 * @param a_pi8 Where to return the signed byte.
2274 * @remark Implicitly references pVCpu.
2275 */
2276#ifndef IEM_WITH_SETJMP
2277# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2278 do \
2279 { \
2280 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2281 if (rcStrict2 != VINF_SUCCESS) \
2282 return rcStrict2; \
2283 } while (0)
2284#else /* IEM_WITH_SETJMP */
2285# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2286
2287#endif /* IEM_WITH_SETJMP */
2288
2289#ifndef IEM_WITH_SETJMP
2290
2291/**
2292 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2293 *
2294 * @returns Strict VBox status code.
2295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2296 * @param pu16 Where to return the opcode dword.
2297 */
2298DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2299{
2300 uint8_t u8;
2301 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2302 if (rcStrict == VINF_SUCCESS)
2303 *pu16 = (int8_t)u8;
2304 return rcStrict;
2305}
2306
2307
2308/**
2309 * Fetches the next signed byte from the opcode stream, extending it to
2310 * unsigned 16-bit.
2311 *
2312 * @returns Strict VBox status code.
2313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2314 * @param pu16 Where to return the unsigned word.
2315 */
2316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2317{
2318 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2319 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2320 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2321
2322 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2323 pVCpu->iem.s.offOpcode = offOpcode + 1;
2324 return VINF_SUCCESS;
2325}
2326
2327#endif /* !IEM_WITH_SETJMP */
2328
2329/**
2330 * Fetches the next signed byte from the opcode stream and sign-extending it to
2331 * a word, returning automatically on failure.
2332 *
2333 * @param a_pu16 Where to return the word.
2334 * @remark Implicitly references pVCpu.
2335 */
2336#ifndef IEM_WITH_SETJMP
2337# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2338 do \
2339 { \
2340 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2341 if (rcStrict2 != VINF_SUCCESS) \
2342 return rcStrict2; \
2343 } while (0)
2344#else
2345# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2346#endif
2347
2348#ifndef IEM_WITH_SETJMP
2349
2350/**
2351 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2352 *
2353 * @returns Strict VBox status code.
2354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2355 * @param pu32 Where to return the opcode dword.
2356 */
2357DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2358{
2359 uint8_t u8;
2360 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2361 if (rcStrict == VINF_SUCCESS)
2362 *pu32 = (int8_t)u8;
2363 return rcStrict;
2364}
2365
2366
2367/**
2368 * Fetches the next signed byte from the opcode stream, extending it to
2369 * unsigned 32-bit.
2370 *
2371 * @returns Strict VBox status code.
2372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2373 * @param pu32 Where to return the unsigned dword.
2374 */
2375DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2376{
2377 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2378 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2379 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2380
2381 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2382 pVCpu->iem.s.offOpcode = offOpcode + 1;
2383 return VINF_SUCCESS;
2384}
2385
2386#endif /* !IEM_WITH_SETJMP */
2387
2388/**
2389 * Fetches the next signed byte from the opcode stream and sign-extending it to
2390 * a word, returning automatically on failure.
2391 *
2392 * @param a_pu32 Where to return the word.
2393 * @remark Implicitly references pVCpu.
2394 */
2395#ifndef IEM_WITH_SETJMP
2396#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2397 do \
2398 { \
2399 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2400 if (rcStrict2 != VINF_SUCCESS) \
2401 return rcStrict2; \
2402 } while (0)
2403#else
2404# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2405#endif
2406
2407#ifndef IEM_WITH_SETJMP
2408
2409/**
2410 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2411 *
2412 * @returns Strict VBox status code.
2413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2414 * @param pu64 Where to return the opcode qword.
2415 */
2416DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2417{
2418 uint8_t u8;
2419 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2420 if (rcStrict == VINF_SUCCESS)
2421 *pu64 = (int8_t)u8;
2422 return rcStrict;
2423}
2424
2425
2426/**
2427 * Fetches the next signed byte from the opcode stream, extending it to
2428 * unsigned 64-bit.
2429 *
2430 * @returns Strict VBox status code.
2431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2432 * @param pu64 Where to return the unsigned qword.
2433 */
2434DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2435{
2436 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2437 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2438 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2439
2440 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2441 pVCpu->iem.s.offOpcode = offOpcode + 1;
2442 return VINF_SUCCESS;
2443}
2444
2445#endif /* !IEM_WITH_SETJMP */
2446
2447
2448/**
2449 * Fetches the next signed byte from the opcode stream and sign-extending it to
2450 * a word, returning automatically on failure.
2451 *
2452 * @param a_pu64 Where to return the word.
2453 * @remark Implicitly references pVCpu.
2454 */
2455#ifndef IEM_WITH_SETJMP
2456# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2457 do \
2458 { \
2459 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2460 if (rcStrict2 != VINF_SUCCESS) \
2461 return rcStrict2; \
2462 } while (0)
2463#else
2464# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2465#endif
2466
2467
2468#ifndef IEM_WITH_SETJMP
2469/**
2470 * Fetches the next opcode byte.
2471 *
2472 * @returns Strict VBox status code.
2473 * @param pVCpu The cross context virtual CPU structure of the
2474 * calling thread.
2475 * @param pu8 Where to return the opcode byte.
2476 */
2477DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2478{
2479 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2480 pVCpu->iem.s.offModRm = offOpcode;
2481 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2482 {
2483 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2484 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2485 return VINF_SUCCESS;
2486 }
2487 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2488}
2489#else /* IEM_WITH_SETJMP */
2490/**
2491 * Fetches the next opcode byte, longjmp on error.
2492 *
2493 * @returns The opcode byte.
2494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2495 */
2496DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2497{
2498# ifdef IEM_WITH_CODE_TLB
2499 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2500 pVCpu->iem.s.offModRm = offBuf;
2501 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2502 if (RT_LIKELY( pbBuf != NULL
2503 && offBuf < pVCpu->iem.s.cbInstrBuf))
2504 {
2505 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2506 return pbBuf[offBuf];
2507 }
2508# else
2509 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2510 pVCpu->iem.s.offModRm = offOpcode;
2511 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2512 {
2513 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2514 return pVCpu->iem.s.abOpcode[offOpcode];
2515 }
2516# endif
2517 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2518}
2519#endif /* IEM_WITH_SETJMP */
2520
2521/**
2522 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2523 * on failure.
2524 *
2525 * Will note down the position of the ModR/M byte for VT-x exits.
2526 *
2527 * @param a_pbRm Where to return the RM opcode byte.
2528 * @remark Implicitly references pVCpu.
2529 */
2530#ifndef IEM_WITH_SETJMP
2531# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2532 do \
2533 { \
2534 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2535 if (rcStrict2 == VINF_SUCCESS) \
2536 { /* likely */ } \
2537 else \
2538 return rcStrict2; \
2539 } while (0)
2540#else
2541# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2542#endif /* IEM_WITH_SETJMP */
2543
2544
2545#ifndef IEM_WITH_SETJMP
2546
2547/**
2548 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2549 *
2550 * @returns Strict VBox status code.
2551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2552 * @param pu16 Where to return the opcode word.
2553 */
2554DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2555{
2556 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2557 if (rcStrict == VINF_SUCCESS)
2558 {
2559 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2560# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2561 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2562# else
2563 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2564# endif
2565 pVCpu->iem.s.offOpcode = offOpcode + 2;
2566 }
2567 else
2568 *pu16 = 0;
2569 return rcStrict;
2570}
2571
2572
2573/**
2574 * Fetches the next opcode word.
2575 *
2576 * @returns Strict VBox status code.
2577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2578 * @param pu16 Where to return the opcode word.
2579 */
2580DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2581{
2582 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2583 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2584 {
2585 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2586# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2587 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2588# else
2589 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2590# endif
2591 return VINF_SUCCESS;
2592 }
2593 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2594}
2595
2596#else /* IEM_WITH_SETJMP */
2597
2598/**
2599 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2600 *
2601 * @returns The opcode word.
2602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2603 */
2604DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2605{
2606# ifdef IEM_WITH_CODE_TLB
2607 uint16_t u16;
2608 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2609 return u16;
2610# else
2611 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2612 if (rcStrict == VINF_SUCCESS)
2613 {
2614 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2615 pVCpu->iem.s.offOpcode += 2;
2616# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2617 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2618# else
2619 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2620# endif
2621 }
2622 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2623# endif
2624}
2625
2626
2627/**
2628 * Fetches the next opcode word, longjmp on error.
2629 *
2630 * @returns The opcode word.
2631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2632 */
2633DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2634{
2635# ifdef IEM_WITH_CODE_TLB
2636 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2637 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2638 if (RT_LIKELY( pbBuf != NULL
2639 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2640 {
2641 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2642# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2643 return *(uint16_t const *)&pbBuf[offBuf];
2644# else
2645 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2646# endif
2647 }
2648# else
2649 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2650 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2651 {
2652 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2653# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2654 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2655# else
2656 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2657# endif
2658 }
2659# endif
2660 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2661}
2662
2663#endif /* IEM_WITH_SETJMP */
2664
2665
2666/**
2667 * Fetches the next opcode word, returns automatically on failure.
2668 *
2669 * @param a_pu16 Where to return the opcode word.
2670 * @remark Implicitly references pVCpu.
2671 */
2672#ifndef IEM_WITH_SETJMP
2673# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2674 do \
2675 { \
2676 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2677 if (rcStrict2 != VINF_SUCCESS) \
2678 return rcStrict2; \
2679 } while (0)
2680#else
2681# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2682#endif
2683
2684#ifndef IEM_WITH_SETJMP
2685
2686/**
2687 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2688 *
2689 * @returns Strict VBox status code.
2690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2691 * @param pu32 Where to return the opcode double word.
2692 */
2693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2694{
2695 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2696 if (rcStrict == VINF_SUCCESS)
2697 {
2698 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2699 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2700 pVCpu->iem.s.offOpcode = offOpcode + 2;
2701 }
2702 else
2703 *pu32 = 0;
2704 return rcStrict;
2705}
2706
2707
2708/**
2709 * Fetches the next opcode word, zero extending it to a double word.
2710 *
2711 * @returns Strict VBox status code.
2712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2713 * @param pu32 Where to return the opcode double word.
2714 */
2715DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2716{
2717 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2718 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2719 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2720
2721 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2722 pVCpu->iem.s.offOpcode = offOpcode + 2;
2723 return VINF_SUCCESS;
2724}
2725
2726#endif /* !IEM_WITH_SETJMP */
2727
2728
2729/**
2730 * Fetches the next opcode word and zero extends it to a double word, returns
2731 * automatically on failure.
2732 *
2733 * @param a_pu32 Where to return the opcode double word.
2734 * @remark Implicitly references pVCpu.
2735 */
2736#ifndef IEM_WITH_SETJMP
2737# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2738 do \
2739 { \
2740 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2741 if (rcStrict2 != VINF_SUCCESS) \
2742 return rcStrict2; \
2743 } while (0)
2744#else
2745# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2746#endif
2747
2748#ifndef IEM_WITH_SETJMP
2749
2750/**
2751 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2752 *
2753 * @returns Strict VBox status code.
2754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2755 * @param pu64 Where to return the opcode quad word.
2756 */
2757DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2758{
2759 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2760 if (rcStrict == VINF_SUCCESS)
2761 {
2762 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2763 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2764 pVCpu->iem.s.offOpcode = offOpcode + 2;
2765 }
2766 else
2767 *pu64 = 0;
2768 return rcStrict;
2769}
2770
2771
2772/**
2773 * Fetches the next opcode word, zero extending it to a quad word.
2774 *
2775 * @returns Strict VBox status code.
2776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2777 * @param pu64 Where to return the opcode quad word.
2778 */
2779DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2780{
2781 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2782 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2783 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2784
2785 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2786 pVCpu->iem.s.offOpcode = offOpcode + 2;
2787 return VINF_SUCCESS;
2788}
2789
2790#endif /* !IEM_WITH_SETJMP */
2791
2792/**
2793 * Fetches the next opcode word and zero extends it to a quad word, returns
2794 * automatically on failure.
2795 *
2796 * @param a_pu64 Where to return the opcode quad word.
2797 * @remark Implicitly references pVCpu.
2798 */
2799#ifndef IEM_WITH_SETJMP
2800# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2801 do \
2802 { \
2803 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2804 if (rcStrict2 != VINF_SUCCESS) \
2805 return rcStrict2; \
2806 } while (0)
2807#else
2808# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2809#endif
2810
2811
2812#ifndef IEM_WITH_SETJMP
2813/**
2814 * Fetches the next signed word from the opcode stream.
2815 *
2816 * @returns Strict VBox status code.
2817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2818 * @param pi16 Where to return the signed word.
2819 */
2820DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2821{
2822 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2823}
2824#endif /* !IEM_WITH_SETJMP */
2825
2826
2827/**
2828 * Fetches the next signed word from the opcode stream, returning automatically
2829 * on failure.
2830 *
2831 * @param a_pi16 Where to return the signed word.
2832 * @remark Implicitly references pVCpu.
2833 */
2834#ifndef IEM_WITH_SETJMP
2835# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2836 do \
2837 { \
2838 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2839 if (rcStrict2 != VINF_SUCCESS) \
2840 return rcStrict2; \
2841 } while (0)
2842#else
2843# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2844#endif
2845
2846#ifndef IEM_WITH_SETJMP
2847
2848/**
2849 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2850 *
2851 * @returns Strict VBox status code.
2852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2853 * @param pu32 Where to return the opcode dword.
2854 */
2855DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2856{
2857 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2858 if (rcStrict == VINF_SUCCESS)
2859 {
2860 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2861# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2862 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2863# else
2864 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2865 pVCpu->iem.s.abOpcode[offOpcode + 1],
2866 pVCpu->iem.s.abOpcode[offOpcode + 2],
2867 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2868# endif
2869 pVCpu->iem.s.offOpcode = offOpcode + 4;
2870 }
2871 else
2872 *pu32 = 0;
2873 return rcStrict;
2874}
2875
2876
2877/**
2878 * Fetches the next opcode dword.
2879 *
2880 * @returns Strict VBox status code.
2881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2882 * @param pu32 Where to return the opcode double word.
2883 */
2884DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2885{
2886 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2887 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2888 {
2889 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2890# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2891 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2892# else
2893 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2894 pVCpu->iem.s.abOpcode[offOpcode + 1],
2895 pVCpu->iem.s.abOpcode[offOpcode + 2],
2896 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2897# endif
2898 return VINF_SUCCESS;
2899 }
2900 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2901}
2902
2903#else /* !IEM_WITH_SETJMP */
2904
2905/**
2906 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2907 *
2908 * @returns The opcode dword.
2909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2910 */
2911DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2912{
2913# ifdef IEM_WITH_CODE_TLB
2914 uint32_t u32;
2915 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2916 return u32;
2917# else
2918 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2919 if (rcStrict == VINF_SUCCESS)
2920 {
2921 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2922 pVCpu->iem.s.offOpcode = offOpcode + 4;
2923# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2924 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2925# else
2926 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2927 pVCpu->iem.s.abOpcode[offOpcode + 1],
2928 pVCpu->iem.s.abOpcode[offOpcode + 2],
2929 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2930# endif
2931 }
2932 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2933# endif
2934}
2935
2936
2937/**
2938 * Fetches the next opcode dword, longjmp on error.
2939 *
2940 * @returns The opcode dword.
2941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2942 */
2943DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2944{
2945# ifdef IEM_WITH_CODE_TLB
2946 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2947 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2948 if (RT_LIKELY( pbBuf != NULL
2949 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2950 {
2951 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2952# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2953 return *(uint32_t const *)&pbBuf[offBuf];
2954# else
2955 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2956 pbBuf[offBuf + 1],
2957 pbBuf[offBuf + 2],
2958 pbBuf[offBuf + 3]);
2959# endif
2960 }
2961# else
2962 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2963 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2964 {
2965 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2966# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2967 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2968# else
2969 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2970 pVCpu->iem.s.abOpcode[offOpcode + 1],
2971 pVCpu->iem.s.abOpcode[offOpcode + 2],
2972 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2973# endif
2974 }
2975# endif
2976 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2977}
2978
2979#endif /* !IEM_WITH_SETJMP */
2980
2981
2982/**
2983 * Fetches the next opcode dword, returns automatically on failure.
2984 *
2985 * @param a_pu32 Where to return the opcode dword.
2986 * @remark Implicitly references pVCpu.
2987 */
2988#ifndef IEM_WITH_SETJMP
2989# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2990 do \
2991 { \
2992 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2993 if (rcStrict2 != VINF_SUCCESS) \
2994 return rcStrict2; \
2995 } while (0)
2996#else
2997# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2998#endif
2999
3000#ifndef IEM_WITH_SETJMP
3001
3002/**
3003 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3004 *
3005 * @returns Strict VBox status code.
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param pu64 Where to return the opcode dword.
3008 */
3009DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3010{
3011 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3012 if (rcStrict == VINF_SUCCESS)
3013 {
3014 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3015 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3016 pVCpu->iem.s.abOpcode[offOpcode + 1],
3017 pVCpu->iem.s.abOpcode[offOpcode + 2],
3018 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3019 pVCpu->iem.s.offOpcode = offOpcode + 4;
3020 }
3021 else
3022 *pu64 = 0;
3023 return rcStrict;
3024}
3025
3026
3027/**
3028 * Fetches the next opcode dword, zero extending it to a quad word.
3029 *
3030 * @returns Strict VBox status code.
3031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3032 * @param pu64 Where to return the opcode quad word.
3033 */
3034DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3035{
3036 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3037 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3038 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3039
3040 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3041 pVCpu->iem.s.abOpcode[offOpcode + 1],
3042 pVCpu->iem.s.abOpcode[offOpcode + 2],
3043 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3044 pVCpu->iem.s.offOpcode = offOpcode + 4;
3045 return VINF_SUCCESS;
3046}
3047
3048#endif /* !IEM_WITH_SETJMP */
3049
3050
3051/**
3052 * Fetches the next opcode dword and zero extends it to a quad word, returns
3053 * automatically on failure.
3054 *
3055 * @param a_pu64 Where to return the opcode quad word.
3056 * @remark Implicitly references pVCpu.
3057 */
3058#ifndef IEM_WITH_SETJMP
3059# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3060 do \
3061 { \
3062 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3063 if (rcStrict2 != VINF_SUCCESS) \
3064 return rcStrict2; \
3065 } while (0)
3066#else
3067# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3068#endif
3069
3070
3071#ifndef IEM_WITH_SETJMP
3072/**
3073 * Fetches the next signed double word from the opcode stream.
3074 *
3075 * @returns Strict VBox status code.
3076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3077 * @param pi32 Where to return the signed double word.
3078 */
3079DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3080{
3081 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3082}
3083#endif
3084
3085/**
3086 * Fetches the next signed double word from the opcode stream, returning
3087 * automatically on failure.
3088 *
3089 * @param a_pi32 Where to return the signed double word.
3090 * @remark Implicitly references pVCpu.
3091 */
3092#ifndef IEM_WITH_SETJMP
3093# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3094 do \
3095 { \
3096 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3097 if (rcStrict2 != VINF_SUCCESS) \
3098 return rcStrict2; \
3099 } while (0)
3100#else
3101# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3102#endif
3103
3104#ifndef IEM_WITH_SETJMP
3105
3106/**
3107 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3108 *
3109 * @returns Strict VBox status code.
3110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3111 * @param pu64 Where to return the opcode qword.
3112 */
3113DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3114{
3115 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3116 if (rcStrict == VINF_SUCCESS)
3117 {
3118 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3119 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3120 pVCpu->iem.s.abOpcode[offOpcode + 1],
3121 pVCpu->iem.s.abOpcode[offOpcode + 2],
3122 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3123 pVCpu->iem.s.offOpcode = offOpcode + 4;
3124 }
3125 else
3126 *pu64 = 0;
3127 return rcStrict;
3128}
3129
3130
3131/**
3132 * Fetches the next opcode dword, sign extending it into a quad word.
3133 *
3134 * @returns Strict VBox status code.
3135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3136 * @param pu64 Where to return the opcode quad word.
3137 */
3138DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3139{
3140 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3141 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3142 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3143
3144 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3145 pVCpu->iem.s.abOpcode[offOpcode + 1],
3146 pVCpu->iem.s.abOpcode[offOpcode + 2],
3147 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3148 *pu64 = i32;
3149 pVCpu->iem.s.offOpcode = offOpcode + 4;
3150 return VINF_SUCCESS;
3151}
3152
3153#endif /* !IEM_WITH_SETJMP */
3154
3155
3156/**
3157 * Fetches the next opcode double word and sign extends it to a quad word,
3158 * returns automatically on failure.
3159 *
3160 * @param a_pu64 Where to return the opcode quad word.
3161 * @remark Implicitly references pVCpu.
3162 */
3163#ifndef IEM_WITH_SETJMP
3164# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3165 do \
3166 { \
3167 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3168 if (rcStrict2 != VINF_SUCCESS) \
3169 return rcStrict2; \
3170 } while (0)
3171#else
3172# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3173#endif
3174
3175#ifndef IEM_WITH_SETJMP
3176
3177/**
3178 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3179 *
3180 * @returns Strict VBox status code.
3181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3182 * @param pu64 Where to return the opcode qword.
3183 */
3184DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3185{
3186 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3187 if (rcStrict == VINF_SUCCESS)
3188 {
3189 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3190# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3191 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3192# else
3193 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3194 pVCpu->iem.s.abOpcode[offOpcode + 1],
3195 pVCpu->iem.s.abOpcode[offOpcode + 2],
3196 pVCpu->iem.s.abOpcode[offOpcode + 3],
3197 pVCpu->iem.s.abOpcode[offOpcode + 4],
3198 pVCpu->iem.s.abOpcode[offOpcode + 5],
3199 pVCpu->iem.s.abOpcode[offOpcode + 6],
3200 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3201# endif
3202 pVCpu->iem.s.offOpcode = offOpcode + 8;
3203 }
3204 else
3205 *pu64 = 0;
3206 return rcStrict;
3207}
3208
3209
3210/**
3211 * Fetches the next opcode qword.
3212 *
3213 * @returns Strict VBox status code.
3214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3215 * @param pu64 Where to return the opcode qword.
3216 */
3217DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3218{
3219 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3220 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3221 {
3222# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3223 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3224# else
3225 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3226 pVCpu->iem.s.abOpcode[offOpcode + 1],
3227 pVCpu->iem.s.abOpcode[offOpcode + 2],
3228 pVCpu->iem.s.abOpcode[offOpcode + 3],
3229 pVCpu->iem.s.abOpcode[offOpcode + 4],
3230 pVCpu->iem.s.abOpcode[offOpcode + 5],
3231 pVCpu->iem.s.abOpcode[offOpcode + 6],
3232 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3233# endif
3234 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3235 return VINF_SUCCESS;
3236 }
3237 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3238}
3239
3240#else /* IEM_WITH_SETJMP */
3241
3242/**
3243 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3244 *
3245 * @returns The opcode qword.
3246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3247 */
3248DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3249{
3250# ifdef IEM_WITH_CODE_TLB
3251 uint64_t u64;
3252 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3253 return u64;
3254# else
3255 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3256 if (rcStrict == VINF_SUCCESS)
3257 {
3258 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3259 pVCpu->iem.s.offOpcode = offOpcode + 8;
3260# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3261 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3262# else
3263 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3264 pVCpu->iem.s.abOpcode[offOpcode + 1],
3265 pVCpu->iem.s.abOpcode[offOpcode + 2],
3266 pVCpu->iem.s.abOpcode[offOpcode + 3],
3267 pVCpu->iem.s.abOpcode[offOpcode + 4],
3268 pVCpu->iem.s.abOpcode[offOpcode + 5],
3269 pVCpu->iem.s.abOpcode[offOpcode + 6],
3270 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3271# endif
3272 }
3273 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3274# endif
3275}
3276
3277
3278/**
3279 * Fetches the next opcode qword, longjmp on error.
3280 *
3281 * @returns The opcode qword.
3282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3283 */
3284DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3285{
3286# ifdef IEM_WITH_CODE_TLB
3287 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3288 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3289 if (RT_LIKELY( pbBuf != NULL
3290 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3291 {
3292 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3293# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3294 return *(uint64_t const *)&pbBuf[offBuf];
3295# else
3296 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3297 pbBuf[offBuf + 1],
3298 pbBuf[offBuf + 2],
3299 pbBuf[offBuf + 3],
3300 pbBuf[offBuf + 4],
3301 pbBuf[offBuf + 5],
3302 pbBuf[offBuf + 6],
3303 pbBuf[offBuf + 7]);
3304# endif
3305 }
3306# else
3307 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3308 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3309 {
3310 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3311# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3312 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3313# else
3314 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3315 pVCpu->iem.s.abOpcode[offOpcode + 1],
3316 pVCpu->iem.s.abOpcode[offOpcode + 2],
3317 pVCpu->iem.s.abOpcode[offOpcode + 3],
3318 pVCpu->iem.s.abOpcode[offOpcode + 4],
3319 pVCpu->iem.s.abOpcode[offOpcode + 5],
3320 pVCpu->iem.s.abOpcode[offOpcode + 6],
3321 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3322# endif
3323 }
3324# endif
3325 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3326}
3327
3328#endif /* IEM_WITH_SETJMP */
3329
3330/**
3331 * Fetches the next opcode quad word, returns automatically on failure.
3332 *
3333 * @param a_pu64 Where to return the opcode quad word.
3334 * @remark Implicitly references pVCpu.
3335 */
3336#ifndef IEM_WITH_SETJMP
3337# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3338 do \
3339 { \
3340 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3341 if (rcStrict2 != VINF_SUCCESS) \
3342 return rcStrict2; \
3343 } while (0)
3344#else
3345# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3346#endif
3347
3348
3349/** @name Misc Worker Functions.
3350 * @{
3351 */
3352
3353/**
3354 * Gets the exception class for the specified exception vector.
3355 *
3356 * @returns The class of the specified exception.
3357 * @param uVector The exception vector.
3358 */
3359IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3360{
3361 Assert(uVector <= X86_XCPT_LAST);
3362 switch (uVector)
3363 {
3364 case X86_XCPT_DE:
3365 case X86_XCPT_TS:
3366 case X86_XCPT_NP:
3367 case X86_XCPT_SS:
3368 case X86_XCPT_GP:
3369 case X86_XCPT_SX: /* AMD only */
3370 return IEMXCPTCLASS_CONTRIBUTORY;
3371
3372 case X86_XCPT_PF:
3373 case X86_XCPT_VE: /* Intel only */
3374 return IEMXCPTCLASS_PAGE_FAULT;
3375
3376 case X86_XCPT_DF:
3377 return IEMXCPTCLASS_DOUBLE_FAULT;
3378 }
3379 return IEMXCPTCLASS_BENIGN;
3380}
3381
3382
3383/**
3384 * Evaluates how to handle an exception caused during delivery of another event
3385 * (exception / interrupt).
3386 *
3387 * @returns How to handle the recursive exception.
3388 * @param pVCpu The cross context virtual CPU structure of the
3389 * calling thread.
3390 * @param fPrevFlags The flags of the previous event.
3391 * @param uPrevVector The vector of the previous event.
3392 * @param fCurFlags The flags of the current exception.
3393 * @param uCurVector The vector of the current exception.
3394 * @param pfXcptRaiseInfo Where to store additional information about the
3395 * exception condition. Optional.
3396 */
3397VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3398 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3399{
3400 /*
3401 * Only CPU exceptions can be raised while delivering other events, software interrupt
3402 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3403 */
3404 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3405 Assert(pVCpu); RT_NOREF(pVCpu);
3406 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3407
3408 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3409 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3410 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3411 {
3412 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3413 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3414 {
3415 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3416 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3417 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3418 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3419 {
3420 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3421 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3422 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3423 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3424 uCurVector, pVCpu->cpum.GstCtx.cr2));
3425 }
3426 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3427 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3428 {
3429 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3430 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3431 }
3432 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3433 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3434 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3435 {
3436 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3437 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3438 }
3439 }
3440 else
3441 {
3442 if (uPrevVector == X86_XCPT_NMI)
3443 {
3444 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3445 if (uCurVector == X86_XCPT_PF)
3446 {
3447 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3448 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3449 }
3450 }
3451 else if ( uPrevVector == X86_XCPT_AC
3452 && uCurVector == X86_XCPT_AC)
3453 {
3454 enmRaise = IEMXCPTRAISE_CPU_HANG;
3455 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3456 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3457 }
3458 }
3459 }
3460 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3461 {
3462 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3463 if (uCurVector == X86_XCPT_PF)
3464 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3465 }
3466 else
3467 {
3468 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3469 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3470 }
3471
3472 if (pfXcptRaiseInfo)
3473 *pfXcptRaiseInfo = fRaiseInfo;
3474 return enmRaise;
3475}
3476
3477
3478/**
3479 * Enters the CPU shutdown state initiated by a triple fault or other
3480 * unrecoverable conditions.
3481 *
3482 * @returns Strict VBox status code.
3483 * @param pVCpu The cross context virtual CPU structure of the
3484 * calling thread.
3485 */
3486IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3487{
3488 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3489 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3490
3491 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3492 {
3493 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3494 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3495 }
3496
3497 RT_NOREF(pVCpu);
3498 return VINF_EM_TRIPLE_FAULT;
3499}
3500
3501
3502/**
3503 * Validates a new SS segment.
3504 *
3505 * @returns VBox strict status code.
3506 * @param pVCpu The cross context virtual CPU structure of the
3507 * calling thread.
3508 * @param NewSS The new SS selctor.
3509 * @param uCpl The CPL to load the stack for.
3510 * @param pDesc Where to return the descriptor.
3511 */
3512IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3513{
3514 /* Null selectors are not allowed (we're not called for dispatching
3515 interrupts with SS=0 in long mode). */
3516 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3517 {
3518 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3519 return iemRaiseTaskSwitchFault0(pVCpu);
3520 }
3521
3522 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3523 if ((NewSS & X86_SEL_RPL) != uCpl)
3524 {
3525 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3526 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3527 }
3528
3529 /*
3530 * Read the descriptor.
3531 */
3532 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3533 if (rcStrict != VINF_SUCCESS)
3534 return rcStrict;
3535
3536 /*
3537 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3538 */
3539 if (!pDesc->Legacy.Gen.u1DescType)
3540 {
3541 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3542 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3543 }
3544
3545 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3546 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3547 {
3548 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3549 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3550 }
3551 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3552 {
3553 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3554 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3555 }
3556
3557 /* Is it there? */
3558 /** @todo testcase: Is this checked before the canonical / limit check below? */
3559 if (!pDesc->Legacy.Gen.u1Present)
3560 {
3561 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3562 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3563 }
3564
3565 return VINF_SUCCESS;
3566}
3567
3568
3569/**
3570 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3571 * not.
3572 *
3573 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3574 */
3575#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3576# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3577#else
3578# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3579#endif
3580
3581/**
3582 * Updates the EFLAGS in the correct manner wrt. PATM.
3583 *
3584 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3585 * @param a_fEfl The new EFLAGS.
3586 */
3587#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3588# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3589#else
3590# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3591#endif
3592
3593
3594/** @} */
3595
3596/** @name Raising Exceptions.
3597 *
3598 * @{
3599 */
3600
3601
3602/**
3603 * Loads the specified stack far pointer from the TSS.
3604 *
3605 * @returns VBox strict status code.
3606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3607 * @param uCpl The CPL to load the stack for.
3608 * @param pSelSS Where to return the new stack segment.
3609 * @param puEsp Where to return the new stack pointer.
3610 */
3611IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3612{
3613 VBOXSTRICTRC rcStrict;
3614 Assert(uCpl < 4);
3615
3616 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3617 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3618 {
3619 /*
3620 * 16-bit TSS (X86TSS16).
3621 */
3622 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3623 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3624 {
3625 uint32_t off = uCpl * 4 + 2;
3626 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3627 {
3628 /** @todo check actual access pattern here. */
3629 uint32_t u32Tmp = 0; /* gcc maybe... */
3630 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3631 if (rcStrict == VINF_SUCCESS)
3632 {
3633 *puEsp = RT_LOWORD(u32Tmp);
3634 *pSelSS = RT_HIWORD(u32Tmp);
3635 return VINF_SUCCESS;
3636 }
3637 }
3638 else
3639 {
3640 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3641 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3642 }
3643 break;
3644 }
3645
3646 /*
3647 * 32-bit TSS (X86TSS32).
3648 */
3649 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3650 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3651 {
3652 uint32_t off = uCpl * 8 + 4;
3653 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3654 {
3655/** @todo check actual access pattern here. */
3656 uint64_t u64Tmp;
3657 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3658 if (rcStrict == VINF_SUCCESS)
3659 {
3660 *puEsp = u64Tmp & UINT32_MAX;
3661 *pSelSS = (RTSEL)(u64Tmp >> 32);
3662 return VINF_SUCCESS;
3663 }
3664 }
3665 else
3666 {
3667 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3668 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3669 }
3670 break;
3671 }
3672
3673 default:
3674 AssertFailed();
3675 rcStrict = VERR_IEM_IPE_4;
3676 break;
3677 }
3678
3679 *puEsp = 0; /* make gcc happy */
3680 *pSelSS = 0; /* make gcc happy */
3681 return rcStrict;
3682}
3683
3684
3685/**
3686 * Loads the specified stack pointer from the 64-bit TSS.
3687 *
3688 * @returns VBox strict status code.
3689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3690 * @param uCpl The CPL to load the stack for.
3691 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3692 * @param puRsp Where to return the new stack pointer.
3693 */
3694IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3695{
3696 Assert(uCpl < 4);
3697 Assert(uIst < 8);
3698 *puRsp = 0; /* make gcc happy */
3699
3700 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3701 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3702
3703 uint32_t off;
3704 if (uIst)
3705 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3706 else
3707 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3708 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3709 {
3710 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3711 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3712 }
3713
3714 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3715}
3716
3717
3718/**
3719 * Adjust the CPU state according to the exception being raised.
3720 *
3721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3722 * @param u8Vector The exception that has been raised.
3723 */
3724DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3725{
3726 switch (u8Vector)
3727 {
3728 case X86_XCPT_DB:
3729 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3730 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3731 break;
3732 /** @todo Read the AMD and Intel exception reference... */
3733 }
3734}
3735
3736
3737/**
3738 * Implements exceptions and interrupts for real mode.
3739 *
3740 * @returns VBox strict status code.
3741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3742 * @param cbInstr The number of bytes to offset rIP by in the return
3743 * address.
3744 * @param u8Vector The interrupt / exception vector number.
3745 * @param fFlags The flags.
3746 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3747 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3748 */
3749IEM_STATIC VBOXSTRICTRC
3750iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3751 uint8_t cbInstr,
3752 uint8_t u8Vector,
3753 uint32_t fFlags,
3754 uint16_t uErr,
3755 uint64_t uCr2)
3756{
3757 NOREF(uErr); NOREF(uCr2);
3758 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3759
3760 /*
3761 * Read the IDT entry.
3762 */
3763 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3764 {
3765 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3766 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3767 }
3768 RTFAR16 Idte;
3769 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3770 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3771 {
3772 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3773 return rcStrict;
3774 }
3775
3776 /*
3777 * Push the stack frame.
3778 */
3779 uint16_t *pu16Frame;
3780 uint64_t uNewRsp;
3781 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3782 if (rcStrict != VINF_SUCCESS)
3783 return rcStrict;
3784
3785 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3786#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3787 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3788 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3789 fEfl |= UINT16_C(0xf000);
3790#endif
3791 pu16Frame[2] = (uint16_t)fEfl;
3792 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3793 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3794 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3795 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3796 return rcStrict;
3797
3798 /*
3799 * Load the vector address into cs:ip and make exception specific state
3800 * adjustments.
3801 */
3802 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3803 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3804 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3805 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3806 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3807 pVCpu->cpum.GstCtx.rip = Idte.off;
3808 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3809 IEMMISC_SET_EFL(pVCpu, fEfl);
3810
3811 /** @todo do we actually do this in real mode? */
3812 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3813 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3814
3815 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3816}
3817
3818
3819/**
3820 * Loads a NULL data selector into when coming from V8086 mode.
3821 *
3822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3823 * @param pSReg Pointer to the segment register.
3824 */
3825IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3826{
3827 pSReg->Sel = 0;
3828 pSReg->ValidSel = 0;
3829 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3830 {
3831 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3832 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3833 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3834 }
3835 else
3836 {
3837 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3838 /** @todo check this on AMD-V */
3839 pSReg->u64Base = 0;
3840 pSReg->u32Limit = 0;
3841 }
3842}
3843
3844
3845/**
3846 * Loads a segment selector during a task switch in V8086 mode.
3847 *
3848 * @param pSReg Pointer to the segment register.
3849 * @param uSel The selector value to load.
3850 */
3851IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3852{
3853 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3854 pSReg->Sel = uSel;
3855 pSReg->ValidSel = uSel;
3856 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3857 pSReg->u64Base = uSel << 4;
3858 pSReg->u32Limit = 0xffff;
3859 pSReg->Attr.u = 0xf3;
3860}
3861
3862
3863/**
3864 * Loads a NULL data selector into a selector register, both the hidden and
3865 * visible parts, in protected mode.
3866 *
3867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3868 * @param pSReg Pointer to the segment register.
3869 * @param uRpl The RPL.
3870 */
3871IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3872{
3873 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3874 * data selector in protected mode. */
3875 pSReg->Sel = uRpl;
3876 pSReg->ValidSel = uRpl;
3877 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3878 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3879 {
3880 /* VT-x (Intel 3960x) observed doing something like this. */
3881 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3882 pSReg->u32Limit = UINT32_MAX;
3883 pSReg->u64Base = 0;
3884 }
3885 else
3886 {
3887 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3888 pSReg->u32Limit = 0;
3889 pSReg->u64Base = 0;
3890 }
3891}
3892
3893
3894/**
3895 * Loads a segment selector during a task switch in protected mode.
3896 *
3897 * In this task switch scenario, we would throw \#TS exceptions rather than
3898 * \#GPs.
3899 *
3900 * @returns VBox strict status code.
3901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3902 * @param pSReg Pointer to the segment register.
3903 * @param uSel The new selector value.
3904 *
3905 * @remarks This does _not_ handle CS or SS.
3906 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3907 */
3908IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3909{
3910 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3911
3912 /* Null data selector. */
3913 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3914 {
3915 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3916 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3917 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3918 return VINF_SUCCESS;
3919 }
3920
3921 /* Fetch the descriptor. */
3922 IEMSELDESC Desc;
3923 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3924 if (rcStrict != VINF_SUCCESS)
3925 {
3926 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3927 VBOXSTRICTRC_VAL(rcStrict)));
3928 return rcStrict;
3929 }
3930
3931 /* Must be a data segment or readable code segment. */
3932 if ( !Desc.Legacy.Gen.u1DescType
3933 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3934 {
3935 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3936 Desc.Legacy.Gen.u4Type));
3937 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3938 }
3939
3940 /* Check privileges for data segments and non-conforming code segments. */
3941 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3942 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3943 {
3944 /* The RPL and the new CPL must be less than or equal to the DPL. */
3945 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3946 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3947 {
3948 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3949 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3950 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3951 }
3952 }
3953
3954 /* Is it there? */
3955 if (!Desc.Legacy.Gen.u1Present)
3956 {
3957 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3958 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3959 }
3960
3961 /* The base and limit. */
3962 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3963 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3964
3965 /*
3966 * Ok, everything checked out fine. Now set the accessed bit before
3967 * committing the result into the registers.
3968 */
3969 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3970 {
3971 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3972 if (rcStrict != VINF_SUCCESS)
3973 return rcStrict;
3974 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3975 }
3976
3977 /* Commit */
3978 pSReg->Sel = uSel;
3979 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3980 pSReg->u32Limit = cbLimit;
3981 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3982 pSReg->ValidSel = uSel;
3983 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3984 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3985 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3986
3987 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3988 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3989 return VINF_SUCCESS;
3990}
3991
3992
3993/**
3994 * Performs a task switch.
3995 *
3996 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3997 * caller is responsible for performing the necessary checks (like DPL, TSS
3998 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3999 * reference for JMP, CALL, IRET.
4000 *
4001 * If the task switch is the due to a software interrupt or hardware exception,
4002 * the caller is responsible for validating the TSS selector and descriptor. See
4003 * Intel Instruction reference for INT n.
4004 *
4005 * @returns VBox strict status code.
4006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4007 * @param enmTaskSwitch The cause of the task switch.
4008 * @param uNextEip The EIP effective after the task switch.
4009 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4010 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4011 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4012 * @param SelTSS The TSS selector of the new task.
4013 * @param pNewDescTSS Pointer to the new TSS descriptor.
4014 */
4015IEM_STATIC VBOXSTRICTRC
4016iemTaskSwitch(PVMCPU pVCpu,
4017 IEMTASKSWITCH enmTaskSwitch,
4018 uint32_t uNextEip,
4019 uint32_t fFlags,
4020 uint16_t uErr,
4021 uint64_t uCr2,
4022 RTSEL SelTSS,
4023 PIEMSELDESC pNewDescTSS)
4024{
4025 Assert(!IEM_IS_REAL_MODE(pVCpu));
4026 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4027 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4028
4029 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4030 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4031 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4032 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4033 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4034
4035 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4036 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4037
4038 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4039 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4040
4041 /* Update CR2 in case it's a page-fault. */
4042 /** @todo This should probably be done much earlier in IEM/PGM. See
4043 * @bugref{5653#c49}. */
4044 if (fFlags & IEM_XCPT_FLAGS_CR2)
4045 pVCpu->cpum.GstCtx.cr2 = uCr2;
4046
4047 /*
4048 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4049 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4050 */
4051 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4052 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4053 if (uNewTSSLimit < uNewTSSLimitMin)
4054 {
4055 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4056 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4057 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4058 }
4059
4060 /*
4061 * Task switches in VMX non-root mode always cause task switches.
4062 * The new TSS must have been read and validated (DPL, limits etc.) before a
4063 * task-switch VM-exit commences.
4064 *
4065 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4066 */
4067 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4068 {
4069 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4070 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4071 }
4072
4073 /*
4074 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4075 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4076 */
4077 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4078 {
4079 uint32_t const uExitInfo1 = SelTSS;
4080 uint32_t uExitInfo2 = uErr;
4081 switch (enmTaskSwitch)
4082 {
4083 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4084 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4085 default: break;
4086 }
4087 if (fFlags & IEM_XCPT_FLAGS_ERR)
4088 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4089 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4090 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4091
4092 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4093 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4094 RT_NOREF2(uExitInfo1, uExitInfo2);
4095 }
4096
4097 /*
4098 * Check the current TSS limit. The last written byte to the current TSS during the
4099 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4100 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4101 *
4102 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4103 * end up with smaller than "legal" TSS limits.
4104 */
4105 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4106 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4107 if (uCurTSSLimit < uCurTSSLimitMin)
4108 {
4109 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4110 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4111 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4112 }
4113
4114 /*
4115 * Verify that the new TSS can be accessed and map it. Map only the required contents
4116 * and not the entire TSS.
4117 */
4118 void *pvNewTSS;
4119 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4120 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4121 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4122 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4123 * not perform correct translation if this happens. See Intel spec. 7.2.1
4124 * "Task-State Segment" */
4125 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4126 if (rcStrict != VINF_SUCCESS)
4127 {
4128 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4129 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4130 return rcStrict;
4131 }
4132
4133 /*
4134 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4135 */
4136 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4137 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4138 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4139 {
4140 PX86DESC pDescCurTSS;
4141 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4142 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4143 if (rcStrict != VINF_SUCCESS)
4144 {
4145 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4146 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4147 return rcStrict;
4148 }
4149
4150 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4151 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4152 if (rcStrict != VINF_SUCCESS)
4153 {
4154 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4155 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4156 return rcStrict;
4157 }
4158
4159 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4160 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4161 {
4162 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4163 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4164 u32EFlags &= ~X86_EFL_NT;
4165 }
4166 }
4167
4168 /*
4169 * Save the CPU state into the current TSS.
4170 */
4171 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4172 if (GCPtrNewTSS == GCPtrCurTSS)
4173 {
4174 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4175 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4176 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4177 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4178 pVCpu->cpum.GstCtx.ldtr.Sel));
4179 }
4180 if (fIsNewTSS386)
4181 {
4182 /*
4183 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4184 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4185 */
4186 void *pvCurTSS32;
4187 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4188 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4189 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4190 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4191 if (rcStrict != VINF_SUCCESS)
4192 {
4193 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4194 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4195 return rcStrict;
4196 }
4197
4198 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4199 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4200 pCurTSS32->eip = uNextEip;
4201 pCurTSS32->eflags = u32EFlags;
4202 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4203 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4204 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4205 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4206 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4207 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4208 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4209 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4210 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4211 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4212 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4213 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4214 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4215 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4216
4217 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4218 if (rcStrict != VINF_SUCCESS)
4219 {
4220 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4221 VBOXSTRICTRC_VAL(rcStrict)));
4222 return rcStrict;
4223 }
4224 }
4225 else
4226 {
4227 /*
4228 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4229 */
4230 void *pvCurTSS16;
4231 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4232 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4233 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4234 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4235 if (rcStrict != VINF_SUCCESS)
4236 {
4237 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4238 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4239 return rcStrict;
4240 }
4241
4242 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4243 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4244 pCurTSS16->ip = uNextEip;
4245 pCurTSS16->flags = u32EFlags;
4246 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4247 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4248 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4249 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4250 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4251 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4252 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4253 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4254 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4255 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4256 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4257 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4258
4259 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4260 if (rcStrict != VINF_SUCCESS)
4261 {
4262 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4263 VBOXSTRICTRC_VAL(rcStrict)));
4264 return rcStrict;
4265 }
4266 }
4267
4268 /*
4269 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4270 */
4271 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4272 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4273 {
4274 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4275 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4276 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4277 }
4278
4279 /*
4280 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4281 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4282 */
4283 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4284 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4285 bool fNewDebugTrap;
4286 if (fIsNewTSS386)
4287 {
4288 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4289 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4290 uNewEip = pNewTSS32->eip;
4291 uNewEflags = pNewTSS32->eflags;
4292 uNewEax = pNewTSS32->eax;
4293 uNewEcx = pNewTSS32->ecx;
4294 uNewEdx = pNewTSS32->edx;
4295 uNewEbx = pNewTSS32->ebx;
4296 uNewEsp = pNewTSS32->esp;
4297 uNewEbp = pNewTSS32->ebp;
4298 uNewEsi = pNewTSS32->esi;
4299 uNewEdi = pNewTSS32->edi;
4300 uNewES = pNewTSS32->es;
4301 uNewCS = pNewTSS32->cs;
4302 uNewSS = pNewTSS32->ss;
4303 uNewDS = pNewTSS32->ds;
4304 uNewFS = pNewTSS32->fs;
4305 uNewGS = pNewTSS32->gs;
4306 uNewLdt = pNewTSS32->selLdt;
4307 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4308 }
4309 else
4310 {
4311 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4312 uNewCr3 = 0;
4313 uNewEip = pNewTSS16->ip;
4314 uNewEflags = pNewTSS16->flags;
4315 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4316 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4317 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4318 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4319 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4320 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4321 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4322 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4323 uNewES = pNewTSS16->es;
4324 uNewCS = pNewTSS16->cs;
4325 uNewSS = pNewTSS16->ss;
4326 uNewDS = pNewTSS16->ds;
4327 uNewFS = 0;
4328 uNewGS = 0;
4329 uNewLdt = pNewTSS16->selLdt;
4330 fNewDebugTrap = false;
4331 }
4332
4333 if (GCPtrNewTSS == GCPtrCurTSS)
4334 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4335 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4336
4337 /*
4338 * We're done accessing the new TSS.
4339 */
4340 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4341 if (rcStrict != VINF_SUCCESS)
4342 {
4343 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4344 return rcStrict;
4345 }
4346
4347 /*
4348 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4349 */
4350 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4351 {
4352 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4353 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4354 if (rcStrict != VINF_SUCCESS)
4355 {
4356 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4357 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4358 return rcStrict;
4359 }
4360
4361 /* Check that the descriptor indicates the new TSS is available (not busy). */
4362 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4363 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4364 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4365
4366 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4367 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4368 if (rcStrict != VINF_SUCCESS)
4369 {
4370 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4371 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4372 return rcStrict;
4373 }
4374 }
4375
4376 /*
4377 * From this point on, we're technically in the new task. We will defer exceptions
4378 * until the completion of the task switch but before executing any instructions in the new task.
4379 */
4380 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4381 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4382 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4383 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4384 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4385 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4386 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4387
4388 /* Set the busy bit in TR. */
4389 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4390 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4391 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4392 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4393 {
4394 uNewEflags |= X86_EFL_NT;
4395 }
4396
4397 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4398 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4399 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4400
4401 pVCpu->cpum.GstCtx.eip = uNewEip;
4402 pVCpu->cpum.GstCtx.eax = uNewEax;
4403 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4404 pVCpu->cpum.GstCtx.edx = uNewEdx;
4405 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4406 pVCpu->cpum.GstCtx.esp = uNewEsp;
4407 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4408 pVCpu->cpum.GstCtx.esi = uNewEsi;
4409 pVCpu->cpum.GstCtx.edi = uNewEdi;
4410
4411 uNewEflags &= X86_EFL_LIVE_MASK;
4412 uNewEflags |= X86_EFL_RA1_MASK;
4413 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4414
4415 /*
4416 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4417 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4418 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4419 */
4420 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4421 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4422
4423 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4424 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4425
4426 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4427 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4428
4429 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4430 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4431
4432 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4433 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4434
4435 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4436 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4437 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4438
4439 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4440 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4441 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4442 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4443
4444 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4445 {
4446 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4447 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4448 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4449 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4450 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4451 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4452 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4453 }
4454
4455 /*
4456 * Switch CR3 for the new task.
4457 */
4458 if ( fIsNewTSS386
4459 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4460 {
4461 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4462 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4463 AssertRCSuccessReturn(rc, rc);
4464
4465 /* Inform PGM. */
4466 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4467 AssertRCReturn(rc, rc);
4468 /* ignore informational status codes */
4469
4470 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4471 }
4472
4473 /*
4474 * Switch LDTR for the new task.
4475 */
4476 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4477 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4478 else
4479 {
4480 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4481
4482 IEMSELDESC DescNewLdt;
4483 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4484 if (rcStrict != VINF_SUCCESS)
4485 {
4486 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4487 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4488 return rcStrict;
4489 }
4490 if ( !DescNewLdt.Legacy.Gen.u1Present
4491 || DescNewLdt.Legacy.Gen.u1DescType
4492 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4493 {
4494 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4495 uNewLdt, DescNewLdt.Legacy.u));
4496 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4497 }
4498
4499 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4500 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4501 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4502 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4503 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4504 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4505 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4507 }
4508
4509 IEMSELDESC DescSS;
4510 if (IEM_IS_V86_MODE(pVCpu))
4511 {
4512 pVCpu->iem.s.uCpl = 3;
4513 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4514 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4515 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4516 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4517 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4518 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4519
4520 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4521 DescSS.Legacy.u = 0;
4522 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4523 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4524 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4525 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4526 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4527 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4528 DescSS.Legacy.Gen.u2Dpl = 3;
4529 }
4530 else
4531 {
4532 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4533
4534 /*
4535 * Load the stack segment for the new task.
4536 */
4537 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4538 {
4539 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4540 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4541 }
4542
4543 /* Fetch the descriptor. */
4544 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4545 if (rcStrict != VINF_SUCCESS)
4546 {
4547 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4548 VBOXSTRICTRC_VAL(rcStrict)));
4549 return rcStrict;
4550 }
4551
4552 /* SS must be a data segment and writable. */
4553 if ( !DescSS.Legacy.Gen.u1DescType
4554 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4555 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4556 {
4557 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4558 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4559 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4560 }
4561
4562 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4563 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4564 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4565 {
4566 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4567 uNewCpl));
4568 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4569 }
4570
4571 /* Is it there? */
4572 if (!DescSS.Legacy.Gen.u1Present)
4573 {
4574 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4575 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4576 }
4577
4578 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4579 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4580
4581 /* Set the accessed bit before committing the result into SS. */
4582 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4583 {
4584 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4585 if (rcStrict != VINF_SUCCESS)
4586 return rcStrict;
4587 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4588 }
4589
4590 /* Commit SS. */
4591 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4592 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4593 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4594 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4595 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4596 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4597 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4598
4599 /* CPL has changed, update IEM before loading rest of segments. */
4600 pVCpu->iem.s.uCpl = uNewCpl;
4601
4602 /*
4603 * Load the data segments for the new task.
4604 */
4605 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4606 if (rcStrict != VINF_SUCCESS)
4607 return rcStrict;
4608 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4609 if (rcStrict != VINF_SUCCESS)
4610 return rcStrict;
4611 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4612 if (rcStrict != VINF_SUCCESS)
4613 return rcStrict;
4614 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4615 if (rcStrict != VINF_SUCCESS)
4616 return rcStrict;
4617
4618 /*
4619 * Load the code segment for the new task.
4620 */
4621 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4622 {
4623 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4624 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4625 }
4626
4627 /* Fetch the descriptor. */
4628 IEMSELDESC DescCS;
4629 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4630 if (rcStrict != VINF_SUCCESS)
4631 {
4632 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4633 return rcStrict;
4634 }
4635
4636 /* CS must be a code segment. */
4637 if ( !DescCS.Legacy.Gen.u1DescType
4638 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4639 {
4640 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4641 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4642 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4643 }
4644
4645 /* For conforming CS, DPL must be less than or equal to the RPL. */
4646 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4647 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4648 {
4649 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4650 DescCS.Legacy.Gen.u2Dpl));
4651 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4652 }
4653
4654 /* For non-conforming CS, DPL must match RPL. */
4655 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4656 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4657 {
4658 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4659 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4660 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4661 }
4662
4663 /* Is it there? */
4664 if (!DescCS.Legacy.Gen.u1Present)
4665 {
4666 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4667 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4668 }
4669
4670 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4671 u64Base = X86DESC_BASE(&DescCS.Legacy);
4672
4673 /* Set the accessed bit before committing the result into CS. */
4674 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4675 {
4676 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4677 if (rcStrict != VINF_SUCCESS)
4678 return rcStrict;
4679 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4680 }
4681
4682 /* Commit CS. */
4683 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4684 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4685 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4686 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4687 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4688 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4689 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4690 }
4691
4692 /** @todo Debug trap. */
4693 if (fIsNewTSS386 && fNewDebugTrap)
4694 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4695
4696 /*
4697 * Construct the error code masks based on what caused this task switch.
4698 * See Intel Instruction reference for INT.
4699 */
4700 uint16_t uExt;
4701 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4702 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4703 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4704 {
4705 uExt = 1;
4706 }
4707 else
4708 uExt = 0;
4709
4710 /*
4711 * Push any error code on to the new stack.
4712 */
4713 if (fFlags & IEM_XCPT_FLAGS_ERR)
4714 {
4715 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4716 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4717 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4718
4719 /* Check that there is sufficient space on the stack. */
4720 /** @todo Factor out segment limit checking for normal/expand down segments
4721 * into a separate function. */
4722 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4723 {
4724 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4725 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4726 {
4727 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4728 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4729 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4730 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4731 }
4732 }
4733 else
4734 {
4735 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4736 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4737 {
4738 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4739 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4740 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4741 }
4742 }
4743
4744
4745 if (fIsNewTSS386)
4746 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4747 else
4748 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4749 if (rcStrict != VINF_SUCCESS)
4750 {
4751 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4752 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4753 return rcStrict;
4754 }
4755 }
4756
4757 /* Check the new EIP against the new CS limit. */
4758 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4759 {
4760 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4761 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4762 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4763 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4764 }
4765
4766 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4767 pVCpu->cpum.GstCtx.ss.Sel));
4768 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4769}
4770
4771
4772/**
4773 * Implements exceptions and interrupts for protected mode.
4774 *
4775 * @returns VBox strict status code.
4776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4777 * @param cbInstr The number of bytes to offset rIP by in the return
4778 * address.
4779 * @param u8Vector The interrupt / exception vector number.
4780 * @param fFlags The flags.
4781 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4782 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4783 */
4784IEM_STATIC VBOXSTRICTRC
4785iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4786 uint8_t cbInstr,
4787 uint8_t u8Vector,
4788 uint32_t fFlags,
4789 uint16_t uErr,
4790 uint64_t uCr2)
4791{
4792 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4793
4794 /*
4795 * Read the IDT entry.
4796 */
4797 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4798 {
4799 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4800 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4801 }
4802 X86DESC Idte;
4803 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4804 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4805 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4806 {
4807 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4808 return rcStrict;
4809 }
4810 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4811 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4812 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4813
4814 /*
4815 * Check the descriptor type, DPL and such.
4816 * ASSUMES this is done in the same order as described for call-gate calls.
4817 */
4818 if (Idte.Gate.u1DescType)
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4821 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4822 }
4823 bool fTaskGate = false;
4824 uint8_t f32BitGate = true;
4825 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4826 switch (Idte.Gate.u4Type)
4827 {
4828 case X86_SEL_TYPE_SYS_UNDEFINED:
4829 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4830 case X86_SEL_TYPE_SYS_LDT:
4831 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4832 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4833 case X86_SEL_TYPE_SYS_UNDEFINED2:
4834 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4835 case X86_SEL_TYPE_SYS_UNDEFINED3:
4836 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4837 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4838 case X86_SEL_TYPE_SYS_UNDEFINED4:
4839 {
4840 /** @todo check what actually happens when the type is wrong...
4841 * esp. call gates. */
4842 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4843 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4844 }
4845
4846 case X86_SEL_TYPE_SYS_286_INT_GATE:
4847 f32BitGate = false;
4848 RT_FALL_THRU();
4849 case X86_SEL_TYPE_SYS_386_INT_GATE:
4850 fEflToClear |= X86_EFL_IF;
4851 break;
4852
4853 case X86_SEL_TYPE_SYS_TASK_GATE:
4854 fTaskGate = true;
4855#ifndef IEM_IMPLEMENTS_TASKSWITCH
4856 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4857#endif
4858 break;
4859
4860 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4861 f32BitGate = false;
4862 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4863 break;
4864
4865 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4866 }
4867
4868 /* Check DPL against CPL if applicable. */
4869 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4870 {
4871 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4872 {
4873 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4874 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4875 }
4876 }
4877
4878 /* Is it there? */
4879 if (!Idte.Gate.u1Present)
4880 {
4881 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4882 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4883 }
4884
4885 /* Is it a task-gate? */
4886 if (fTaskGate)
4887 {
4888 /*
4889 * Construct the error code masks based on what caused this task switch.
4890 * See Intel Instruction reference for INT.
4891 */
4892 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4893 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4894 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4895 RTSEL SelTSS = Idte.Gate.u16Sel;
4896
4897 /*
4898 * Fetch the TSS descriptor in the GDT.
4899 */
4900 IEMSELDESC DescTSS;
4901 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4902 if (rcStrict != VINF_SUCCESS)
4903 {
4904 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4905 VBOXSTRICTRC_VAL(rcStrict)));
4906 return rcStrict;
4907 }
4908
4909 /* The TSS descriptor must be a system segment and be available (not busy). */
4910 if ( DescTSS.Legacy.Gen.u1DescType
4911 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4912 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4913 {
4914 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4915 u8Vector, SelTSS, DescTSS.Legacy.au64));
4916 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4917 }
4918
4919 /* The TSS must be present. */
4920 if (!DescTSS.Legacy.Gen.u1Present)
4921 {
4922 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4923 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4924 }
4925
4926 /* Do the actual task switch. */
4927 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4928 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4929 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4930 }
4931
4932 /* A null CS is bad. */
4933 RTSEL NewCS = Idte.Gate.u16Sel;
4934 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4935 {
4936 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4937 return iemRaiseGeneralProtectionFault0(pVCpu);
4938 }
4939
4940 /* Fetch the descriptor for the new CS. */
4941 IEMSELDESC DescCS;
4942 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4943 if (rcStrict != VINF_SUCCESS)
4944 {
4945 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4946 return rcStrict;
4947 }
4948
4949 /* Must be a code segment. */
4950 if (!DescCS.Legacy.Gen.u1DescType)
4951 {
4952 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4953 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4954 }
4955 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4956 {
4957 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4958 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4959 }
4960
4961 /* Don't allow lowering the privilege level. */
4962 /** @todo Does the lowering of privileges apply to software interrupts
4963 * only? This has bearings on the more-privileged or
4964 * same-privilege stack behavior further down. A testcase would
4965 * be nice. */
4966 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4967 {
4968 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4969 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4970 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4971 }
4972
4973 /* Make sure the selector is present. */
4974 if (!DescCS.Legacy.Gen.u1Present)
4975 {
4976 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4977 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4978 }
4979
4980 /* Check the new EIP against the new CS limit. */
4981 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4982 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4983 ? Idte.Gate.u16OffsetLow
4984 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4985 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4986 if (uNewEip > cbLimitCS)
4987 {
4988 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4989 u8Vector, uNewEip, cbLimitCS, NewCS));
4990 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4991 }
4992 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4993
4994 /* Calc the flag image to push. */
4995 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4996 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4997 fEfl &= ~X86_EFL_RF;
4998 else
4999 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5000
5001 /* From V8086 mode only go to CPL 0. */
5002 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5003 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5004 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5005 {
5006 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5007 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5008 }
5009
5010 /*
5011 * If the privilege level changes, we need to get a new stack from the TSS.
5012 * This in turns means validating the new SS and ESP...
5013 */
5014 if (uNewCpl != pVCpu->iem.s.uCpl)
5015 {
5016 RTSEL NewSS;
5017 uint32_t uNewEsp;
5018 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5019 if (rcStrict != VINF_SUCCESS)
5020 return rcStrict;
5021
5022 IEMSELDESC DescSS;
5023 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5024 if (rcStrict != VINF_SUCCESS)
5025 return rcStrict;
5026 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5027 if (!DescSS.Legacy.Gen.u1DefBig)
5028 {
5029 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5030 uNewEsp = (uint16_t)uNewEsp;
5031 }
5032
5033 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5034
5035 /* Check that there is sufficient space for the stack frame. */
5036 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5037 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5038 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5039 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5040
5041 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5042 {
5043 if ( uNewEsp - 1 > cbLimitSS
5044 || uNewEsp < cbStackFrame)
5045 {
5046 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5047 u8Vector, NewSS, uNewEsp, cbStackFrame));
5048 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5049 }
5050 }
5051 else
5052 {
5053 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5054 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5055 {
5056 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5057 u8Vector, NewSS, uNewEsp, cbStackFrame));
5058 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5059 }
5060 }
5061
5062 /*
5063 * Start making changes.
5064 */
5065
5066 /* Set the new CPL so that stack accesses use it. */
5067 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5068 pVCpu->iem.s.uCpl = uNewCpl;
5069
5070 /* Create the stack frame. */
5071 RTPTRUNION uStackFrame;
5072 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5073 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5074 if (rcStrict != VINF_SUCCESS)
5075 return rcStrict;
5076 void * const pvStackFrame = uStackFrame.pv;
5077 if (f32BitGate)
5078 {
5079 if (fFlags & IEM_XCPT_FLAGS_ERR)
5080 *uStackFrame.pu32++ = uErr;
5081 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5082 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5083 uStackFrame.pu32[2] = fEfl;
5084 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5085 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5086 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5087 if (fEfl & X86_EFL_VM)
5088 {
5089 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5090 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5091 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5092 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5093 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5094 }
5095 }
5096 else
5097 {
5098 if (fFlags & IEM_XCPT_FLAGS_ERR)
5099 *uStackFrame.pu16++ = uErr;
5100 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5101 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5102 uStackFrame.pu16[2] = fEfl;
5103 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5104 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5105 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5106 if (fEfl & X86_EFL_VM)
5107 {
5108 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5109 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5110 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5111 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5112 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5113 }
5114 }
5115 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5116 if (rcStrict != VINF_SUCCESS)
5117 return rcStrict;
5118
5119 /* Mark the selectors 'accessed' (hope this is the correct time). */
5120 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5121 * after pushing the stack frame? (Write protect the gdt + stack to
5122 * find out.) */
5123 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5124 {
5125 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5126 if (rcStrict != VINF_SUCCESS)
5127 return rcStrict;
5128 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5129 }
5130
5131 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5132 {
5133 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5134 if (rcStrict != VINF_SUCCESS)
5135 return rcStrict;
5136 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5137 }
5138
5139 /*
5140 * Start comitting the register changes (joins with the DPL=CPL branch).
5141 */
5142 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5143 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5144 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5145 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5146 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5147 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5148 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5149 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5150 * SP is loaded).
5151 * Need to check the other combinations too:
5152 * - 16-bit TSS, 32-bit handler
5153 * - 32-bit TSS, 16-bit handler */
5154 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5155 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5156 else
5157 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5158
5159 if (fEfl & X86_EFL_VM)
5160 {
5161 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5162 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5163 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5164 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5165 }
5166 }
5167 /*
5168 * Same privilege, no stack change and smaller stack frame.
5169 */
5170 else
5171 {
5172 uint64_t uNewRsp;
5173 RTPTRUNION uStackFrame;
5174 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5175 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5176 if (rcStrict != VINF_SUCCESS)
5177 return rcStrict;
5178 void * const pvStackFrame = uStackFrame.pv;
5179
5180 if (f32BitGate)
5181 {
5182 if (fFlags & IEM_XCPT_FLAGS_ERR)
5183 *uStackFrame.pu32++ = uErr;
5184 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5185 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5186 uStackFrame.pu32[2] = fEfl;
5187 }
5188 else
5189 {
5190 if (fFlags & IEM_XCPT_FLAGS_ERR)
5191 *uStackFrame.pu16++ = uErr;
5192 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5193 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5194 uStackFrame.pu16[2] = fEfl;
5195 }
5196 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5197 if (rcStrict != VINF_SUCCESS)
5198 return rcStrict;
5199
5200 /* Mark the CS selector as 'accessed'. */
5201 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5202 {
5203 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5204 if (rcStrict != VINF_SUCCESS)
5205 return rcStrict;
5206 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5207 }
5208
5209 /*
5210 * Start committing the register changes (joins with the other branch).
5211 */
5212 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5213 }
5214
5215 /* ... register committing continues. */
5216 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5217 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5218 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5219 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5220 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5221 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5222
5223 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5224 fEfl &= ~fEflToClear;
5225 IEMMISC_SET_EFL(pVCpu, fEfl);
5226
5227 if (fFlags & IEM_XCPT_FLAGS_CR2)
5228 pVCpu->cpum.GstCtx.cr2 = uCr2;
5229
5230 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5231 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5232
5233 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5234}
5235
5236
5237/**
5238 * Implements exceptions and interrupts for long mode.
5239 *
5240 * @returns VBox strict status code.
5241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5242 * @param cbInstr The number of bytes to offset rIP by in the return
5243 * address.
5244 * @param u8Vector The interrupt / exception vector number.
5245 * @param fFlags The flags.
5246 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5247 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5248 */
5249IEM_STATIC VBOXSTRICTRC
5250iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5251 uint8_t cbInstr,
5252 uint8_t u8Vector,
5253 uint32_t fFlags,
5254 uint16_t uErr,
5255 uint64_t uCr2)
5256{
5257 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5258
5259 /*
5260 * Read the IDT entry.
5261 */
5262 uint16_t offIdt = (uint16_t)u8Vector << 4;
5263 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5264 {
5265 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5266 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5267 }
5268 X86DESC64 Idte;
5269 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5270 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5271 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5272 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5273 {
5274 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5275 return rcStrict;
5276 }
5277 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5278 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5279 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5280
5281 /*
5282 * Check the descriptor type, DPL and such.
5283 * ASSUMES this is done in the same order as described for call-gate calls.
5284 */
5285 if (Idte.Gate.u1DescType)
5286 {
5287 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5288 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5289 }
5290 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5291 switch (Idte.Gate.u4Type)
5292 {
5293 case AMD64_SEL_TYPE_SYS_INT_GATE:
5294 fEflToClear |= X86_EFL_IF;
5295 break;
5296 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5297 break;
5298
5299 default:
5300 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5301 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5302 }
5303
5304 /* Check DPL against CPL if applicable. */
5305 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5306 {
5307 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5308 {
5309 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5310 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5311 }
5312 }
5313
5314 /* Is it there? */
5315 if (!Idte.Gate.u1Present)
5316 {
5317 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5318 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5319 }
5320
5321 /* A null CS is bad. */
5322 RTSEL NewCS = Idte.Gate.u16Sel;
5323 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5324 {
5325 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5326 return iemRaiseGeneralProtectionFault0(pVCpu);
5327 }
5328
5329 /* Fetch the descriptor for the new CS. */
5330 IEMSELDESC DescCS;
5331 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5332 if (rcStrict != VINF_SUCCESS)
5333 {
5334 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5335 return rcStrict;
5336 }
5337
5338 /* Must be a 64-bit code segment. */
5339 if (!DescCS.Long.Gen.u1DescType)
5340 {
5341 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5342 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5343 }
5344 if ( !DescCS.Long.Gen.u1Long
5345 || DescCS.Long.Gen.u1DefBig
5346 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5347 {
5348 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5349 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5350 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5351 }
5352
5353 /* Don't allow lowering the privilege level. For non-conforming CS
5354 selectors, the CS.DPL sets the privilege level the trap/interrupt
5355 handler runs at. For conforming CS selectors, the CPL remains
5356 unchanged, but the CS.DPL must be <= CPL. */
5357 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5358 * when CPU in Ring-0. Result \#GP? */
5359 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5360 {
5361 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5362 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5363 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5364 }
5365
5366
5367 /* Make sure the selector is present. */
5368 if (!DescCS.Legacy.Gen.u1Present)
5369 {
5370 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5371 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5372 }
5373
5374 /* Check that the new RIP is canonical. */
5375 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5376 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5377 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5378 if (!IEM_IS_CANONICAL(uNewRip))
5379 {
5380 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5381 return iemRaiseGeneralProtectionFault0(pVCpu);
5382 }
5383
5384 /*
5385 * If the privilege level changes or if the IST isn't zero, we need to get
5386 * a new stack from the TSS.
5387 */
5388 uint64_t uNewRsp;
5389 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5390 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5391 if ( uNewCpl != pVCpu->iem.s.uCpl
5392 || Idte.Gate.u3IST != 0)
5393 {
5394 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5395 if (rcStrict != VINF_SUCCESS)
5396 return rcStrict;
5397 }
5398 else
5399 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5400 uNewRsp &= ~(uint64_t)0xf;
5401
5402 /*
5403 * Calc the flag image to push.
5404 */
5405 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5406 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5407 fEfl &= ~X86_EFL_RF;
5408 else
5409 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5410
5411 /*
5412 * Start making changes.
5413 */
5414 /* Set the new CPL so that stack accesses use it. */
5415 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5416 pVCpu->iem.s.uCpl = uNewCpl;
5417
5418 /* Create the stack frame. */
5419 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5420 RTPTRUNION uStackFrame;
5421 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5422 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5423 if (rcStrict != VINF_SUCCESS)
5424 return rcStrict;
5425 void * const pvStackFrame = uStackFrame.pv;
5426
5427 if (fFlags & IEM_XCPT_FLAGS_ERR)
5428 *uStackFrame.pu64++ = uErr;
5429 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5430 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5431 uStackFrame.pu64[2] = fEfl;
5432 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5433 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5434 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5435 if (rcStrict != VINF_SUCCESS)
5436 return rcStrict;
5437
5438 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5439 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5440 * after pushing the stack frame? (Write protect the gdt + stack to
5441 * find out.) */
5442 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5443 {
5444 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5445 if (rcStrict != VINF_SUCCESS)
5446 return rcStrict;
5447 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5448 }
5449
5450 /*
5451 * Start comitting the register changes.
5452 */
5453 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5454 * hidden registers when interrupting 32-bit or 16-bit code! */
5455 if (uNewCpl != uOldCpl)
5456 {
5457 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5458 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5459 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5460 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5461 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5462 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5463 }
5464 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5465 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5466 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5467 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5468 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5469 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5470 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5471 pVCpu->cpum.GstCtx.rip = uNewRip;
5472
5473 fEfl &= ~fEflToClear;
5474 IEMMISC_SET_EFL(pVCpu, fEfl);
5475
5476 if (fFlags & IEM_XCPT_FLAGS_CR2)
5477 pVCpu->cpum.GstCtx.cr2 = uCr2;
5478
5479 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5480 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5481
5482 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5483}
5484
5485
5486/**
5487 * Implements exceptions and interrupts.
5488 *
5489 * All exceptions and interrupts goes thru this function!
5490 *
5491 * @returns VBox strict status code.
5492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5493 * @param cbInstr The number of bytes to offset rIP by in the return
5494 * address.
5495 * @param u8Vector The interrupt / exception vector number.
5496 * @param fFlags The flags.
5497 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5498 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5499 */
5500DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5501iemRaiseXcptOrInt(PVMCPU pVCpu,
5502 uint8_t cbInstr,
5503 uint8_t u8Vector,
5504 uint32_t fFlags,
5505 uint16_t uErr,
5506 uint64_t uCr2)
5507{
5508 /*
5509 * Get all the state that we might need here.
5510 */
5511 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5512 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5513
5514#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5515 /*
5516 * Flush prefetch buffer
5517 */
5518 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5519#endif
5520
5521 /*
5522 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5523 */
5524 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5525 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5526 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5527 | IEM_XCPT_FLAGS_BP_INSTR
5528 | IEM_XCPT_FLAGS_ICEBP_INSTR
5529 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5530 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5531 {
5532 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5533 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5534 u8Vector = X86_XCPT_GP;
5535 uErr = 0;
5536 }
5537#ifdef DBGFTRACE_ENABLED
5538 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5539 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5540 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5541#endif
5542
5543 /*
5544 * Evaluate whether NMI blocking should be in effect.
5545 * Normally, NMI blocking is in effect whenever we inject an NMI.
5546 */
5547 bool fBlockNmi;
5548 if ( u8Vector == X86_XCPT_NMI
5549 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5550 fBlockNmi = true;
5551 else
5552 fBlockNmi = false;
5553
5554#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5555 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5556 {
5557 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5558 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5559 return rcStrict0;
5560
5561 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5562 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5563 {
5564 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5565 fBlockNmi = false;
5566 }
5567 }
5568#endif
5569
5570#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5571 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5572 {
5573 /*
5574 * If the event is being injected as part of VMRUN, it isn't subject to event
5575 * intercepts in the nested-guest. However, secondary exceptions that occur
5576 * during injection of any event -are- subject to exception intercepts.
5577 *
5578 * See AMD spec. 15.20 "Event Injection".
5579 */
5580 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5581 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5582 else
5583 {
5584 /*
5585 * Check and handle if the event being raised is intercepted.
5586 */
5587 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5588 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5589 return rcStrict0;
5590 }
5591 }
5592#endif
5593
5594 /*
5595 * Set NMI blocking if necessary.
5596 */
5597 if ( fBlockNmi
5598 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5599 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5600
5601 /*
5602 * Do recursion accounting.
5603 */
5604 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5605 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5606 if (pVCpu->iem.s.cXcptRecursions == 0)
5607 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5608 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5609 else
5610 {
5611 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5612 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5613 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5614
5615 if (pVCpu->iem.s.cXcptRecursions >= 4)
5616 {
5617#ifdef DEBUG_bird
5618 AssertFailed();
5619#endif
5620 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5621 }
5622
5623 /*
5624 * Evaluate the sequence of recurring events.
5625 */
5626 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5627 NULL /* pXcptRaiseInfo */);
5628 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5629 { /* likely */ }
5630 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5631 {
5632 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5633 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5634 u8Vector = X86_XCPT_DF;
5635 uErr = 0;
5636#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5637 /* VMX nested-guest #DF intercept needs to be checked here. */
5638 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5639 {
5640 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5641 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5642 return rcStrict0;
5643 }
5644#endif
5645 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5646 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5647 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5648 }
5649 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5650 {
5651 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5652 return iemInitiateCpuShutdown(pVCpu);
5653 }
5654 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5655 {
5656 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5657 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5658 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5659 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5660 return VERR_EM_GUEST_CPU_HANG;
5661 }
5662 else
5663 {
5664 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5665 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5666 return VERR_IEM_IPE_9;
5667 }
5668
5669 /*
5670 * The 'EXT' bit is set when an exception occurs during deliver of an external
5671 * event (such as an interrupt or earlier exception)[1]. Privileged software
5672 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5673 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5674 *
5675 * [1] - Intel spec. 6.13 "Error Code"
5676 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5677 * [3] - Intel Instruction reference for INT n.
5678 */
5679 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5680 && (fFlags & IEM_XCPT_FLAGS_ERR)
5681 && u8Vector != X86_XCPT_PF
5682 && u8Vector != X86_XCPT_DF)
5683 {
5684 uErr |= X86_TRAP_ERR_EXTERNAL;
5685 }
5686 }
5687
5688 pVCpu->iem.s.cXcptRecursions++;
5689 pVCpu->iem.s.uCurXcpt = u8Vector;
5690 pVCpu->iem.s.fCurXcpt = fFlags;
5691 pVCpu->iem.s.uCurXcptErr = uErr;
5692 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5693
5694 /*
5695 * Extensive logging.
5696 */
5697#if defined(LOG_ENABLED) && defined(IN_RING3)
5698 if (LogIs3Enabled())
5699 {
5700 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5701 PVM pVM = pVCpu->CTX_SUFF(pVM);
5702 char szRegs[4096];
5703 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5704 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5705 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5706 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5707 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5708 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5709 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5710 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5711 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5712 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5713 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5714 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5715 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5716 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5717 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5718 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5719 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5720 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5721 " efer=%016VR{efer}\n"
5722 " pat=%016VR{pat}\n"
5723 " sf_mask=%016VR{sf_mask}\n"
5724 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5725 " lstar=%016VR{lstar}\n"
5726 " star=%016VR{star} cstar=%016VR{cstar}\n"
5727 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5728 );
5729
5730 char szInstr[256];
5731 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5732 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5733 szInstr, sizeof(szInstr), NULL);
5734 Log3(("%s%s\n", szRegs, szInstr));
5735 }
5736#endif /* LOG_ENABLED */
5737
5738 /*
5739 * Call the mode specific worker function.
5740 */
5741 VBOXSTRICTRC rcStrict;
5742 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5743 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5744 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5745 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5746 else
5747 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5748
5749 /* Flush the prefetch buffer. */
5750#ifdef IEM_WITH_CODE_TLB
5751 pVCpu->iem.s.pbInstrBuf = NULL;
5752#else
5753 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5754#endif
5755
5756 /*
5757 * Unwind.
5758 */
5759 pVCpu->iem.s.cXcptRecursions--;
5760 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5761 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5762 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5763 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5764 pVCpu->iem.s.cXcptRecursions + 1));
5765 return rcStrict;
5766}
5767
5768#ifdef IEM_WITH_SETJMP
5769/**
5770 * See iemRaiseXcptOrInt. Will not return.
5771 */
5772IEM_STATIC DECL_NO_RETURN(void)
5773iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5774 uint8_t cbInstr,
5775 uint8_t u8Vector,
5776 uint32_t fFlags,
5777 uint16_t uErr,
5778 uint64_t uCr2)
5779{
5780 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5781 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5782}
5783#endif
5784
5785
5786/** \#DE - 00. */
5787DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5788{
5789 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5790}
5791
5792
5793/** \#DB - 01.
5794 * @note This automatically clear DR7.GD. */
5795DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5796{
5797 /** @todo set/clear RF. */
5798 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5799 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5800}
5801
5802
5803/** \#BR - 05. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5805{
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5807}
5808
5809
5810/** \#UD - 06. */
5811DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5812{
5813 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5814}
5815
5816
5817/** \#NM - 07. */
5818DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5819{
5820 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5821}
5822
5823
5824/** \#TS(err) - 0a. */
5825DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5826{
5827 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5828}
5829
5830
5831/** \#TS(tr) - 0a. */
5832DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5833{
5834 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5835 pVCpu->cpum.GstCtx.tr.Sel, 0);
5836}
5837
5838
5839/** \#TS(0) - 0a. */
5840DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5841{
5842 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5843 0, 0);
5844}
5845
5846
5847/** \#TS(err) - 0a. */
5848DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5849{
5850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5851 uSel & X86_SEL_MASK_OFF_RPL, 0);
5852}
5853
5854
5855/** \#NP(err) - 0b. */
5856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5857{
5858 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5859}
5860
5861
5862/** \#NP(sel) - 0b. */
5863DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5864{
5865 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5866 uSel & ~X86_SEL_RPL, 0);
5867}
5868
5869
5870/** \#SS(seg) - 0c. */
5871DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5872{
5873 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5874 uSel & ~X86_SEL_RPL, 0);
5875}
5876
5877
5878/** \#SS(err) - 0c. */
5879DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5880{
5881 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5882}
5883
5884
5885/** \#GP(n) - 0d. */
5886DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5887{
5888 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5889}
5890
5891
5892/** \#GP(0) - 0d. */
5893DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5894{
5895 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5896}
5897
5898#ifdef IEM_WITH_SETJMP
5899/** \#GP(0) - 0d. */
5900DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5901{
5902 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5903}
5904#endif
5905
5906
5907/** \#GP(sel) - 0d. */
5908DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5909{
5910 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5911 Sel & ~X86_SEL_RPL, 0);
5912}
5913
5914
5915/** \#GP(0) - 0d. */
5916DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5917{
5918 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5919}
5920
5921
5922/** \#GP(sel) - 0d. */
5923DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5924{
5925 NOREF(iSegReg); NOREF(fAccess);
5926 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5927 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5928}
5929
5930#ifdef IEM_WITH_SETJMP
5931/** \#GP(sel) - 0d, longjmp. */
5932DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5933{
5934 NOREF(iSegReg); NOREF(fAccess);
5935 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5936 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5937}
5938#endif
5939
5940/** \#GP(sel) - 0d. */
5941DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5942{
5943 NOREF(Sel);
5944 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5945}
5946
5947#ifdef IEM_WITH_SETJMP
5948/** \#GP(sel) - 0d, longjmp. */
5949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5950{
5951 NOREF(Sel);
5952 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5953}
5954#endif
5955
5956
5957/** \#GP(sel) - 0d. */
5958DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5959{
5960 NOREF(iSegReg); NOREF(fAccess);
5961 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5962}
5963
5964#ifdef IEM_WITH_SETJMP
5965/** \#GP(sel) - 0d, longjmp. */
5966DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5967 uint32_t fAccess)
5968{
5969 NOREF(iSegReg); NOREF(fAccess);
5970 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5971}
5972#endif
5973
5974
5975/** \#PF(n) - 0e. */
5976DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5977{
5978 uint16_t uErr;
5979 switch (rc)
5980 {
5981 case VERR_PAGE_NOT_PRESENT:
5982 case VERR_PAGE_TABLE_NOT_PRESENT:
5983 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5984 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5985 uErr = 0;
5986 break;
5987
5988 default:
5989 AssertMsgFailed(("%Rrc\n", rc));
5990 RT_FALL_THRU();
5991 case VERR_ACCESS_DENIED:
5992 uErr = X86_TRAP_PF_P;
5993 break;
5994
5995 /** @todo reserved */
5996 }
5997
5998 if (pVCpu->iem.s.uCpl == 3)
5999 uErr |= X86_TRAP_PF_US;
6000
6001 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
6002 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6003 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
6004 uErr |= X86_TRAP_PF_ID;
6005
6006#if 0 /* This is so much non-sense, really. Why was it done like that? */
6007 /* Note! RW access callers reporting a WRITE protection fault, will clear
6008 the READ flag before calling. So, read-modify-write accesses (RW)
6009 can safely be reported as READ faults. */
6010 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6011 uErr |= X86_TRAP_PF_RW;
6012#else
6013 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6014 {
6015 if (!(fAccess & IEM_ACCESS_TYPE_READ))
6016 uErr |= X86_TRAP_PF_RW;
6017 }
6018#endif
6019
6020 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6021 uErr, GCPtrWhere);
6022}
6023
6024#ifdef IEM_WITH_SETJMP
6025/** \#PF(n) - 0e, longjmp. */
6026IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6027{
6028 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6029}
6030#endif
6031
6032
6033/** \#MF(0) - 10. */
6034DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6035{
6036 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6037}
6038
6039
6040/** \#AC(0) - 11. */
6041DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6042{
6043 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6044}
6045
6046
6047/**
6048 * Macro for calling iemCImplRaiseDivideError().
6049 *
6050 * This enables us to add/remove arguments and force different levels of
6051 * inlining as we wish.
6052 *
6053 * @return Strict VBox status code.
6054 */
6055#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6056IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6057{
6058 NOREF(cbInstr);
6059 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6060}
6061
6062
6063/**
6064 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6065 *
6066 * This enables us to add/remove arguments and force different levels of
6067 * inlining as we wish.
6068 *
6069 * @return Strict VBox status code.
6070 */
6071#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6072IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6073{
6074 NOREF(cbInstr);
6075 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6076}
6077
6078
6079/**
6080 * Macro for calling iemCImplRaiseInvalidOpcode().
6081 *
6082 * This enables us to add/remove arguments and force different levels of
6083 * inlining as we wish.
6084 *
6085 * @return Strict VBox status code.
6086 */
6087#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6088IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6089{
6090 NOREF(cbInstr);
6091 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6092}
6093
6094
6095/** @} */
6096
6097
6098/*
6099 *
6100 * Helpers routines.
6101 * Helpers routines.
6102 * Helpers routines.
6103 *
6104 */
6105
6106/**
6107 * Recalculates the effective operand size.
6108 *
6109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6110 */
6111IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6112{
6113 switch (pVCpu->iem.s.enmCpuMode)
6114 {
6115 case IEMMODE_16BIT:
6116 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6117 break;
6118 case IEMMODE_32BIT:
6119 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6120 break;
6121 case IEMMODE_64BIT:
6122 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6123 {
6124 case 0:
6125 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6126 break;
6127 case IEM_OP_PRF_SIZE_OP:
6128 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6129 break;
6130 case IEM_OP_PRF_SIZE_REX_W:
6131 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6132 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6133 break;
6134 }
6135 break;
6136 default:
6137 AssertFailed();
6138 }
6139}
6140
6141
6142/**
6143 * Sets the default operand size to 64-bit and recalculates the effective
6144 * operand size.
6145 *
6146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6147 */
6148IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6149{
6150 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6151 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6152 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6153 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6154 else
6155 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6156}
6157
6158
6159/*
6160 *
6161 * Common opcode decoders.
6162 * Common opcode decoders.
6163 * Common opcode decoders.
6164 *
6165 */
6166//#include <iprt/mem.h>
6167
6168/**
6169 * Used to add extra details about a stub case.
6170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6171 */
6172IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6173{
6174#if defined(LOG_ENABLED) && defined(IN_RING3)
6175 PVM pVM = pVCpu->CTX_SUFF(pVM);
6176 char szRegs[4096];
6177 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6178 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6179 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6180 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6181 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6182 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6183 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6184 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6185 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6186 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6187 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6188 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6189 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6190 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6191 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6192 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6193 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6194 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6195 " efer=%016VR{efer}\n"
6196 " pat=%016VR{pat}\n"
6197 " sf_mask=%016VR{sf_mask}\n"
6198 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6199 " lstar=%016VR{lstar}\n"
6200 " star=%016VR{star} cstar=%016VR{cstar}\n"
6201 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6202 );
6203
6204 char szInstr[256];
6205 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6206 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6207 szInstr, sizeof(szInstr), NULL);
6208
6209 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6210#else
6211 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6212#endif
6213}
6214
6215/**
6216 * Complains about a stub.
6217 *
6218 * Providing two versions of this macro, one for daily use and one for use when
6219 * working on IEM.
6220 */
6221#if 0
6222# define IEMOP_BITCH_ABOUT_STUB() \
6223 do { \
6224 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6225 iemOpStubMsg2(pVCpu); \
6226 RTAssertPanic(); \
6227 } while (0)
6228#else
6229# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6230#endif
6231
6232/** Stubs an opcode. */
6233#define FNIEMOP_STUB(a_Name) \
6234 FNIEMOP_DEF(a_Name) \
6235 { \
6236 RT_NOREF_PV(pVCpu); \
6237 IEMOP_BITCH_ABOUT_STUB(); \
6238 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6239 } \
6240 typedef int ignore_semicolon
6241
6242/** Stubs an opcode. */
6243#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6244 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6245 { \
6246 RT_NOREF_PV(pVCpu); \
6247 RT_NOREF_PV(a_Name0); \
6248 IEMOP_BITCH_ABOUT_STUB(); \
6249 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6250 } \
6251 typedef int ignore_semicolon
6252
6253/** Stubs an opcode which currently should raise \#UD. */
6254#define FNIEMOP_UD_STUB(a_Name) \
6255 FNIEMOP_DEF(a_Name) \
6256 { \
6257 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6258 return IEMOP_RAISE_INVALID_OPCODE(); \
6259 } \
6260 typedef int ignore_semicolon
6261
6262/** Stubs an opcode which currently should raise \#UD. */
6263#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6264 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6265 { \
6266 RT_NOREF_PV(pVCpu); \
6267 RT_NOREF_PV(a_Name0); \
6268 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6269 return IEMOP_RAISE_INVALID_OPCODE(); \
6270 } \
6271 typedef int ignore_semicolon
6272
6273
6274
6275/** @name Register Access.
6276 * @{
6277 */
6278
6279/**
6280 * Gets a reference (pointer) to the specified hidden segment register.
6281 *
6282 * @returns Hidden register reference.
6283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6284 * @param iSegReg The segment register.
6285 */
6286IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6287{
6288 Assert(iSegReg < X86_SREG_COUNT);
6289 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6290 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6291
6292#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6293 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6294 { /* likely */ }
6295 else
6296 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6297#else
6298 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6299#endif
6300 return pSReg;
6301}
6302
6303
6304/**
6305 * Ensures that the given hidden segment register is up to date.
6306 *
6307 * @returns Hidden register reference.
6308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6309 * @param pSReg The segment register.
6310 */
6311IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6312{
6313#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6314 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6315 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6316#else
6317 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6318 NOREF(pVCpu);
6319#endif
6320 return pSReg;
6321}
6322
6323
6324/**
6325 * Gets a reference (pointer) to the specified segment register (the selector
6326 * value).
6327 *
6328 * @returns Pointer to the selector variable.
6329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6330 * @param iSegReg The segment register.
6331 */
6332DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6333{
6334 Assert(iSegReg < X86_SREG_COUNT);
6335 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6336 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6337}
6338
6339
6340/**
6341 * Fetches the selector value of a segment register.
6342 *
6343 * @returns The selector value.
6344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6345 * @param iSegReg The segment register.
6346 */
6347DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6348{
6349 Assert(iSegReg < X86_SREG_COUNT);
6350 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6351 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6352}
6353
6354
6355/**
6356 * Fetches the base address value of a segment register.
6357 *
6358 * @returns The selector value.
6359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6360 * @param iSegReg The segment register.
6361 */
6362DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6363{
6364 Assert(iSegReg < X86_SREG_COUNT);
6365 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6366 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6367}
6368
6369
6370/**
6371 * Gets a reference (pointer) to the specified general purpose register.
6372 *
6373 * @returns Register reference.
6374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6375 * @param iReg The general purpose register.
6376 */
6377DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6378{
6379 Assert(iReg < 16);
6380 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6381}
6382
6383
6384/**
6385 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6386 *
6387 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6388 *
6389 * @returns Register reference.
6390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6391 * @param iReg The register.
6392 */
6393DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6394{
6395 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6396 {
6397 Assert(iReg < 16);
6398 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6399 }
6400 /* high 8-bit register. */
6401 Assert(iReg < 8);
6402 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6403}
6404
6405
6406/**
6407 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6408 *
6409 * @returns Register reference.
6410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6411 * @param iReg The register.
6412 */
6413DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6414{
6415 Assert(iReg < 16);
6416 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6417}
6418
6419
6420/**
6421 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6422 *
6423 * @returns Register reference.
6424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6425 * @param iReg The register.
6426 */
6427DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6428{
6429 Assert(iReg < 16);
6430 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6431}
6432
6433
6434/**
6435 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6436 *
6437 * @returns Register reference.
6438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6439 * @param iReg The register.
6440 */
6441DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6442{
6443 Assert(iReg < 64);
6444 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6445}
6446
6447
6448/**
6449 * Gets a reference (pointer) to the specified segment register's base address.
6450 *
6451 * @returns Segment register base address reference.
6452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6453 * @param iSegReg The segment selector.
6454 */
6455DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6456{
6457 Assert(iSegReg < X86_SREG_COUNT);
6458 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6459 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6460}
6461
6462
6463/**
6464 * Fetches the value of a 8-bit general purpose register.
6465 *
6466 * @returns The register value.
6467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6468 * @param iReg The register.
6469 */
6470DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6471{
6472 return *iemGRegRefU8(pVCpu, iReg);
6473}
6474
6475
6476/**
6477 * Fetches the value of a 16-bit general purpose register.
6478 *
6479 * @returns The register value.
6480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6481 * @param iReg The register.
6482 */
6483DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6484{
6485 Assert(iReg < 16);
6486 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6487}
6488
6489
6490/**
6491 * Fetches the value of a 32-bit general purpose register.
6492 *
6493 * @returns The register value.
6494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6495 * @param iReg The register.
6496 */
6497DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6498{
6499 Assert(iReg < 16);
6500 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6501}
6502
6503
6504/**
6505 * Fetches the value of a 64-bit general purpose register.
6506 *
6507 * @returns The register value.
6508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6509 * @param iReg The register.
6510 */
6511DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6512{
6513 Assert(iReg < 16);
6514 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6515}
6516
6517
6518/**
6519 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6520 *
6521 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6522 * segment limit.
6523 *
6524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6525 * @param offNextInstr The offset of the next instruction.
6526 */
6527IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6528{
6529 switch (pVCpu->iem.s.enmEffOpSize)
6530 {
6531 case IEMMODE_16BIT:
6532 {
6533 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6534 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6535 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6536 return iemRaiseGeneralProtectionFault0(pVCpu);
6537 pVCpu->cpum.GstCtx.rip = uNewIp;
6538 break;
6539 }
6540
6541 case IEMMODE_32BIT:
6542 {
6543 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6544 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6545
6546 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6547 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6548 return iemRaiseGeneralProtectionFault0(pVCpu);
6549 pVCpu->cpum.GstCtx.rip = uNewEip;
6550 break;
6551 }
6552
6553 case IEMMODE_64BIT:
6554 {
6555 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6556
6557 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6558 if (!IEM_IS_CANONICAL(uNewRip))
6559 return iemRaiseGeneralProtectionFault0(pVCpu);
6560 pVCpu->cpum.GstCtx.rip = uNewRip;
6561 break;
6562 }
6563
6564 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6565 }
6566
6567 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6568
6569#ifndef IEM_WITH_CODE_TLB
6570 /* Flush the prefetch buffer. */
6571 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6572#endif
6573
6574 return VINF_SUCCESS;
6575}
6576
6577
6578/**
6579 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6580 *
6581 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6582 * segment limit.
6583 *
6584 * @returns Strict VBox status code.
6585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6586 * @param offNextInstr The offset of the next instruction.
6587 */
6588IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6589{
6590 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6591
6592 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6593 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6594 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6595 return iemRaiseGeneralProtectionFault0(pVCpu);
6596 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6597 pVCpu->cpum.GstCtx.rip = uNewIp;
6598 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6599
6600#ifndef IEM_WITH_CODE_TLB
6601 /* Flush the prefetch buffer. */
6602 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6603#endif
6604
6605 return VINF_SUCCESS;
6606}
6607
6608
6609/**
6610 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6611 *
6612 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6613 * segment limit.
6614 *
6615 * @returns Strict VBox status code.
6616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6617 * @param offNextInstr The offset of the next instruction.
6618 */
6619IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6620{
6621 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6622
6623 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6624 {
6625 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6626
6627 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6628 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6629 return iemRaiseGeneralProtectionFault0(pVCpu);
6630 pVCpu->cpum.GstCtx.rip = uNewEip;
6631 }
6632 else
6633 {
6634 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6635
6636 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6637 if (!IEM_IS_CANONICAL(uNewRip))
6638 return iemRaiseGeneralProtectionFault0(pVCpu);
6639 pVCpu->cpum.GstCtx.rip = uNewRip;
6640 }
6641 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6642
6643#ifndef IEM_WITH_CODE_TLB
6644 /* Flush the prefetch buffer. */
6645 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6646#endif
6647
6648 return VINF_SUCCESS;
6649}
6650
6651
6652/**
6653 * Performs a near jump to the specified address.
6654 *
6655 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6656 * segment limit.
6657 *
6658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6659 * @param uNewRip The new RIP value.
6660 */
6661IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6662{
6663 switch (pVCpu->iem.s.enmEffOpSize)
6664 {
6665 case IEMMODE_16BIT:
6666 {
6667 Assert(uNewRip <= UINT16_MAX);
6668 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6669 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6670 return iemRaiseGeneralProtectionFault0(pVCpu);
6671 /** @todo Test 16-bit jump in 64-bit mode. */
6672 pVCpu->cpum.GstCtx.rip = uNewRip;
6673 break;
6674 }
6675
6676 case IEMMODE_32BIT:
6677 {
6678 Assert(uNewRip <= UINT32_MAX);
6679 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6680 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6681
6682 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6683 return iemRaiseGeneralProtectionFault0(pVCpu);
6684 pVCpu->cpum.GstCtx.rip = uNewRip;
6685 break;
6686 }
6687
6688 case IEMMODE_64BIT:
6689 {
6690 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6691
6692 if (!IEM_IS_CANONICAL(uNewRip))
6693 return iemRaiseGeneralProtectionFault0(pVCpu);
6694 pVCpu->cpum.GstCtx.rip = uNewRip;
6695 break;
6696 }
6697
6698 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6699 }
6700
6701 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6702
6703#ifndef IEM_WITH_CODE_TLB
6704 /* Flush the prefetch buffer. */
6705 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6706#endif
6707
6708 return VINF_SUCCESS;
6709}
6710
6711
6712/**
6713 * Get the address of the top of the stack.
6714 *
6715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6716 */
6717DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6718{
6719 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6720 return pVCpu->cpum.GstCtx.rsp;
6721 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6722 return pVCpu->cpum.GstCtx.esp;
6723 return pVCpu->cpum.GstCtx.sp;
6724}
6725
6726
6727/**
6728 * Updates the RIP/EIP/IP to point to the next instruction.
6729 *
6730 * This function leaves the EFLAGS.RF flag alone.
6731 *
6732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6733 * @param cbInstr The number of bytes to add.
6734 */
6735IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6736{
6737 switch (pVCpu->iem.s.enmCpuMode)
6738 {
6739 case IEMMODE_16BIT:
6740 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6741 pVCpu->cpum.GstCtx.eip += cbInstr;
6742 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6743 break;
6744
6745 case IEMMODE_32BIT:
6746 pVCpu->cpum.GstCtx.eip += cbInstr;
6747 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6748 break;
6749
6750 case IEMMODE_64BIT:
6751 pVCpu->cpum.GstCtx.rip += cbInstr;
6752 break;
6753 default: AssertFailed();
6754 }
6755}
6756
6757
6758#if 0
6759/**
6760 * Updates the RIP/EIP/IP to point to the next instruction.
6761 *
6762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6763 */
6764IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6765{
6766 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6767}
6768#endif
6769
6770
6771
6772/**
6773 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6774 *
6775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6776 * @param cbInstr The number of bytes to add.
6777 */
6778IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6779{
6780 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6781
6782 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6783#if ARCH_BITS >= 64
6784 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6785 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6786 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6787#else
6788 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6789 pVCpu->cpum.GstCtx.rip += cbInstr;
6790 else
6791 pVCpu->cpum.GstCtx.eip += cbInstr;
6792#endif
6793}
6794
6795
6796/**
6797 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6798 *
6799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6800 */
6801IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6802{
6803 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6804}
6805
6806
6807/**
6808 * Adds to the stack pointer.
6809 *
6810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6811 * @param cbToAdd The number of bytes to add (8-bit!).
6812 */
6813DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6814{
6815 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6816 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6817 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6818 pVCpu->cpum.GstCtx.esp += cbToAdd;
6819 else
6820 pVCpu->cpum.GstCtx.sp += cbToAdd;
6821}
6822
6823
6824/**
6825 * Subtracts from the stack pointer.
6826 *
6827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6828 * @param cbToSub The number of bytes to subtract (8-bit!).
6829 */
6830DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6831{
6832 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6833 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6834 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6835 pVCpu->cpum.GstCtx.esp -= cbToSub;
6836 else
6837 pVCpu->cpum.GstCtx.sp -= cbToSub;
6838}
6839
6840
6841/**
6842 * Adds to the temporary stack pointer.
6843 *
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6846 * @param cbToAdd The number of bytes to add (16-bit).
6847 */
6848DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6849{
6850 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6851 pTmpRsp->u += cbToAdd;
6852 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6853 pTmpRsp->DWords.dw0 += cbToAdd;
6854 else
6855 pTmpRsp->Words.w0 += cbToAdd;
6856}
6857
6858
6859/**
6860 * Subtracts from the temporary stack pointer.
6861 *
6862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6863 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6864 * @param cbToSub The number of bytes to subtract.
6865 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6866 * expecting that.
6867 */
6868DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6869{
6870 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6871 pTmpRsp->u -= cbToSub;
6872 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6873 pTmpRsp->DWords.dw0 -= cbToSub;
6874 else
6875 pTmpRsp->Words.w0 -= cbToSub;
6876}
6877
6878
6879/**
6880 * Calculates the effective stack address for a push of the specified size as
6881 * well as the new RSP value (upper bits may be masked).
6882 *
6883 * @returns Effective stack addressf for the push.
6884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6885 * @param cbItem The size of the stack item to pop.
6886 * @param puNewRsp Where to return the new RSP value.
6887 */
6888DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6889{
6890 RTUINT64U uTmpRsp;
6891 RTGCPTR GCPtrTop;
6892 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6893
6894 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6895 GCPtrTop = uTmpRsp.u -= cbItem;
6896 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6897 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6898 else
6899 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6900 *puNewRsp = uTmpRsp.u;
6901 return GCPtrTop;
6902}
6903
6904
6905/**
6906 * Gets the current stack pointer and calculates the value after a pop of the
6907 * specified size.
6908 *
6909 * @returns Current stack pointer.
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 * @param cbItem The size of the stack item to pop.
6912 * @param puNewRsp Where to return the new RSP value.
6913 */
6914DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6915{
6916 RTUINT64U uTmpRsp;
6917 RTGCPTR GCPtrTop;
6918 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6919
6920 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6921 {
6922 GCPtrTop = uTmpRsp.u;
6923 uTmpRsp.u += cbItem;
6924 }
6925 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6926 {
6927 GCPtrTop = uTmpRsp.DWords.dw0;
6928 uTmpRsp.DWords.dw0 += cbItem;
6929 }
6930 else
6931 {
6932 GCPtrTop = uTmpRsp.Words.w0;
6933 uTmpRsp.Words.w0 += cbItem;
6934 }
6935 *puNewRsp = uTmpRsp.u;
6936 return GCPtrTop;
6937}
6938
6939
6940/**
6941 * Calculates the effective stack address for a push of the specified size as
6942 * well as the new temporary RSP value (upper bits may be masked).
6943 *
6944 * @returns Effective stack addressf for the push.
6945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6946 * @param pTmpRsp The temporary stack pointer. This is updated.
6947 * @param cbItem The size of the stack item to pop.
6948 */
6949DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6950{
6951 RTGCPTR GCPtrTop;
6952
6953 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6954 GCPtrTop = pTmpRsp->u -= cbItem;
6955 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6956 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6957 else
6958 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6959 return GCPtrTop;
6960}
6961
6962
6963/**
6964 * Gets the effective stack address for a pop of the specified size and
6965 * calculates and updates the temporary RSP.
6966 *
6967 * @returns Current stack pointer.
6968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6969 * @param pTmpRsp The temporary stack pointer. This is updated.
6970 * @param cbItem The size of the stack item to pop.
6971 */
6972DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6973{
6974 RTGCPTR GCPtrTop;
6975 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6976 {
6977 GCPtrTop = pTmpRsp->u;
6978 pTmpRsp->u += cbItem;
6979 }
6980 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6981 {
6982 GCPtrTop = pTmpRsp->DWords.dw0;
6983 pTmpRsp->DWords.dw0 += cbItem;
6984 }
6985 else
6986 {
6987 GCPtrTop = pTmpRsp->Words.w0;
6988 pTmpRsp->Words.w0 += cbItem;
6989 }
6990 return GCPtrTop;
6991}
6992
6993/** @} */
6994
6995
6996/** @name FPU access and helpers.
6997 *
6998 * @{
6999 */
7000
7001
7002/**
7003 * Hook for preparing to use the host FPU.
7004 *
7005 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7006 *
7007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7008 */
7009DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
7010{
7011#ifdef IN_RING3
7012 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7013#else
7014 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7015#endif
7016 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7017}
7018
7019
7020/**
7021 * Hook for preparing to use the host FPU for SSE.
7022 *
7023 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7024 *
7025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7026 */
7027DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
7028{
7029 iemFpuPrepareUsage(pVCpu);
7030}
7031
7032
7033/**
7034 * Hook for preparing to use the host FPU for AVX.
7035 *
7036 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7037 *
7038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7039 */
7040DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7041{
7042 iemFpuPrepareUsage(pVCpu);
7043}
7044
7045
7046/**
7047 * Hook for actualizing the guest FPU state before the interpreter reads it.
7048 *
7049 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7050 *
7051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7052 */
7053DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7054{
7055#ifdef IN_RING3
7056 NOREF(pVCpu);
7057#else
7058 CPUMRZFpuStateActualizeForRead(pVCpu);
7059#endif
7060 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7061}
7062
7063
7064/**
7065 * Hook for actualizing the guest FPU state before the interpreter changes it.
7066 *
7067 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7068 *
7069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7070 */
7071DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7072{
7073#ifdef IN_RING3
7074 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7075#else
7076 CPUMRZFpuStateActualizeForChange(pVCpu);
7077#endif
7078 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7079}
7080
7081
7082/**
7083 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7084 * only.
7085 *
7086 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7087 *
7088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7089 */
7090DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7091{
7092#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7093 NOREF(pVCpu);
7094#else
7095 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7096#endif
7097 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7098}
7099
7100
7101/**
7102 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7103 * read+write.
7104 *
7105 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7106 *
7107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7108 */
7109DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7110{
7111#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7112 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7113#else
7114 CPUMRZFpuStateActualizeForChange(pVCpu);
7115#endif
7116 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7117}
7118
7119
7120/**
7121 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7122 * only.
7123 *
7124 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7125 *
7126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7127 */
7128DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7129{
7130#ifdef IN_RING3
7131 NOREF(pVCpu);
7132#else
7133 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7134#endif
7135 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7136}
7137
7138
7139/**
7140 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7141 * read+write.
7142 *
7143 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7144 *
7145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7146 */
7147DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7148{
7149#ifdef IN_RING3
7150 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7151#else
7152 CPUMRZFpuStateActualizeForChange(pVCpu);
7153#endif
7154 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7155}
7156
7157
7158/**
7159 * Stores a QNaN value into a FPU register.
7160 *
7161 * @param pReg Pointer to the register.
7162 */
7163DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7164{
7165 pReg->au32[0] = UINT32_C(0x00000000);
7166 pReg->au32[1] = UINT32_C(0xc0000000);
7167 pReg->au16[4] = UINT16_C(0xffff);
7168}
7169
7170
7171/**
7172 * Updates the FOP, FPU.CS and FPUIP registers.
7173 *
7174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7175 * @param pFpuCtx The FPU context.
7176 */
7177DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7178{
7179 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7180 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7181 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7182 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7183 {
7184 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7185 * happens in real mode here based on the fnsave and fnstenv images. */
7186 pFpuCtx->CS = 0;
7187 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7188 }
7189 else
7190 {
7191 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7192 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7193 }
7194}
7195
7196
7197/**
7198 * Updates the x87.DS and FPUDP registers.
7199 *
7200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7201 * @param pFpuCtx The FPU context.
7202 * @param iEffSeg The effective segment register.
7203 * @param GCPtrEff The effective address relative to @a iEffSeg.
7204 */
7205DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7206{
7207 RTSEL sel;
7208 switch (iEffSeg)
7209 {
7210 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7211 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7212 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7213 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7214 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7215 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7216 default:
7217 AssertMsgFailed(("%d\n", iEffSeg));
7218 sel = pVCpu->cpum.GstCtx.ds.Sel;
7219 }
7220 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7221 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7222 {
7223 pFpuCtx->DS = 0;
7224 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7225 }
7226 else
7227 {
7228 pFpuCtx->DS = sel;
7229 pFpuCtx->FPUDP = GCPtrEff;
7230 }
7231}
7232
7233
7234/**
7235 * Rotates the stack registers in the push direction.
7236 *
7237 * @param pFpuCtx The FPU context.
7238 * @remarks This is a complete waste of time, but fxsave stores the registers in
7239 * stack order.
7240 */
7241DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7242{
7243 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7244 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7245 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7246 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7247 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7248 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7249 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7250 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7251 pFpuCtx->aRegs[0].r80 = r80Tmp;
7252}
7253
7254
7255/**
7256 * Rotates the stack registers in the pop direction.
7257 *
7258 * @param pFpuCtx The FPU context.
7259 * @remarks This is a complete waste of time, but fxsave stores the registers in
7260 * stack order.
7261 */
7262DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7263{
7264 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7265 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7266 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7267 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7268 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7269 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7270 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7271 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7272 pFpuCtx->aRegs[7].r80 = r80Tmp;
7273}
7274
7275
7276/**
7277 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7278 * exception prevents it.
7279 *
7280 * @param pResult The FPU operation result to push.
7281 * @param pFpuCtx The FPU context.
7282 */
7283IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7284{
7285 /* Update FSW and bail if there are pending exceptions afterwards. */
7286 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7287 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7288 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7289 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7290 {
7291 pFpuCtx->FSW = fFsw;
7292 return;
7293 }
7294
7295 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7296 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7297 {
7298 /* All is fine, push the actual value. */
7299 pFpuCtx->FTW |= RT_BIT(iNewTop);
7300 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7301 }
7302 else if (pFpuCtx->FCW & X86_FCW_IM)
7303 {
7304 /* Masked stack overflow, push QNaN. */
7305 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7306 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7307 }
7308 else
7309 {
7310 /* Raise stack overflow, don't push anything. */
7311 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7312 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7313 return;
7314 }
7315
7316 fFsw &= ~X86_FSW_TOP_MASK;
7317 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7318 pFpuCtx->FSW = fFsw;
7319
7320 iemFpuRotateStackPush(pFpuCtx);
7321}
7322
7323
7324/**
7325 * Stores a result in a FPU register and updates the FSW and FTW.
7326 *
7327 * @param pFpuCtx The FPU context.
7328 * @param pResult The result to store.
7329 * @param iStReg Which FPU register to store it in.
7330 */
7331IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7332{
7333 Assert(iStReg < 8);
7334 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7335 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7336 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7337 pFpuCtx->FTW |= RT_BIT(iReg);
7338 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7339}
7340
7341
7342/**
7343 * Only updates the FPU status word (FSW) with the result of the current
7344 * instruction.
7345 *
7346 * @param pFpuCtx The FPU context.
7347 * @param u16FSW The FSW output of the current instruction.
7348 */
7349IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7350{
7351 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7352 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7353}
7354
7355
7356/**
7357 * Pops one item off the FPU stack if no pending exception prevents it.
7358 *
7359 * @param pFpuCtx The FPU context.
7360 */
7361IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7362{
7363 /* Check pending exceptions. */
7364 uint16_t uFSW = pFpuCtx->FSW;
7365 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7366 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7367 return;
7368
7369 /* TOP--. */
7370 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7371 uFSW &= ~X86_FSW_TOP_MASK;
7372 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7373 pFpuCtx->FSW = uFSW;
7374
7375 /* Mark the previous ST0 as empty. */
7376 iOldTop >>= X86_FSW_TOP_SHIFT;
7377 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7378
7379 /* Rotate the registers. */
7380 iemFpuRotateStackPop(pFpuCtx);
7381}
7382
7383
7384/**
7385 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7386 *
7387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7388 * @param pResult The FPU operation result to push.
7389 */
7390IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7391{
7392 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7393 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7394 iemFpuMaybePushResult(pResult, pFpuCtx);
7395}
7396
7397
7398/**
7399 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7400 * and sets FPUDP and FPUDS.
7401 *
7402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7403 * @param pResult The FPU operation result to push.
7404 * @param iEffSeg The effective segment register.
7405 * @param GCPtrEff The effective address relative to @a iEffSeg.
7406 */
7407IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7408{
7409 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7410 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7411 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7412 iemFpuMaybePushResult(pResult, pFpuCtx);
7413}
7414
7415
7416/**
7417 * Replace ST0 with the first value and push the second onto the FPU stack,
7418 * unless a pending exception prevents it.
7419 *
7420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7421 * @param pResult The FPU operation result to store and push.
7422 */
7423IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7424{
7425 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7426 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7427
7428 /* Update FSW and bail if there are pending exceptions afterwards. */
7429 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7430 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7431 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7432 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7433 {
7434 pFpuCtx->FSW = fFsw;
7435 return;
7436 }
7437
7438 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7439 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7440 {
7441 /* All is fine, push the actual value. */
7442 pFpuCtx->FTW |= RT_BIT(iNewTop);
7443 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7444 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7445 }
7446 else if (pFpuCtx->FCW & X86_FCW_IM)
7447 {
7448 /* Masked stack overflow, push QNaN. */
7449 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7450 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7451 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7452 }
7453 else
7454 {
7455 /* Raise stack overflow, don't push anything. */
7456 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7457 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7458 return;
7459 }
7460
7461 fFsw &= ~X86_FSW_TOP_MASK;
7462 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7463 pFpuCtx->FSW = fFsw;
7464
7465 iemFpuRotateStackPush(pFpuCtx);
7466}
7467
7468
7469/**
7470 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7471 * FOP.
7472 *
7473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7474 * @param pResult The result to store.
7475 * @param iStReg Which FPU register to store it in.
7476 */
7477IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7478{
7479 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7480 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7481 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7482}
7483
7484
7485/**
7486 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7487 * FOP, and then pops the stack.
7488 *
7489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7490 * @param pResult The result to store.
7491 * @param iStReg Which FPU register to store it in.
7492 */
7493IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7494{
7495 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7496 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7497 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7498 iemFpuMaybePopOne(pFpuCtx);
7499}
7500
7501
7502/**
7503 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7504 * FPUDP, and FPUDS.
7505 *
7506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7507 * @param pResult The result to store.
7508 * @param iStReg Which FPU register to store it in.
7509 * @param iEffSeg The effective memory operand selector register.
7510 * @param GCPtrEff The effective memory operand offset.
7511 */
7512IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7513 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7514{
7515 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7516 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7517 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7518 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7519}
7520
7521
7522/**
7523 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7524 * FPUDP, and FPUDS, and then pops the stack.
7525 *
7526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7527 * @param pResult The result to store.
7528 * @param iStReg Which FPU register to store it in.
7529 * @param iEffSeg The effective memory operand selector register.
7530 * @param GCPtrEff The effective memory operand offset.
7531 */
7532IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7533 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7534{
7535 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7536 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7537 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7538 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7539 iemFpuMaybePopOne(pFpuCtx);
7540}
7541
7542
7543/**
7544 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7545 *
7546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7547 */
7548IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7549{
7550 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7551 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7552}
7553
7554
7555/**
7556 * Marks the specified stack register as free (for FFREE).
7557 *
7558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7559 * @param iStReg The register to free.
7560 */
7561IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7562{
7563 Assert(iStReg < 8);
7564 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7565 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7566 pFpuCtx->FTW &= ~RT_BIT(iReg);
7567}
7568
7569
7570/**
7571 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7572 *
7573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7574 */
7575IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7576{
7577 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7578 uint16_t uFsw = pFpuCtx->FSW;
7579 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7580 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7581 uFsw &= ~X86_FSW_TOP_MASK;
7582 uFsw |= uTop;
7583 pFpuCtx->FSW = uFsw;
7584}
7585
7586
7587/**
7588 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7589 *
7590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7591 */
7592IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7593{
7594 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7595 uint16_t uFsw = pFpuCtx->FSW;
7596 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7597 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7598 uFsw &= ~X86_FSW_TOP_MASK;
7599 uFsw |= uTop;
7600 pFpuCtx->FSW = uFsw;
7601}
7602
7603
7604/**
7605 * Updates the FSW, FOP, FPUIP, and FPUCS.
7606 *
7607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7608 * @param u16FSW The FSW from the current instruction.
7609 */
7610IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7611{
7612 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7613 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7614 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7615}
7616
7617
7618/**
7619 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7620 *
7621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7622 * @param u16FSW The FSW from the current instruction.
7623 */
7624IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7625{
7626 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7627 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7628 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7629 iemFpuMaybePopOne(pFpuCtx);
7630}
7631
7632
7633/**
7634 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7635 *
7636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7637 * @param u16FSW The FSW from the current instruction.
7638 * @param iEffSeg The effective memory operand selector register.
7639 * @param GCPtrEff The effective memory operand offset.
7640 */
7641IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7642{
7643 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7644 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7646 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7647}
7648
7649
7650/**
7651 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7652 *
7653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7654 * @param u16FSW The FSW from the current instruction.
7655 */
7656IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7657{
7658 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7659 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7660 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7661 iemFpuMaybePopOne(pFpuCtx);
7662 iemFpuMaybePopOne(pFpuCtx);
7663}
7664
7665
7666/**
7667 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7668 *
7669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7670 * @param u16FSW The FSW from the current instruction.
7671 * @param iEffSeg The effective memory operand selector register.
7672 * @param GCPtrEff The effective memory operand offset.
7673 */
7674IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7675{
7676 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7677 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7678 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7679 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7680 iemFpuMaybePopOne(pFpuCtx);
7681}
7682
7683
7684/**
7685 * Worker routine for raising an FPU stack underflow exception.
7686 *
7687 * @param pFpuCtx The FPU context.
7688 * @param iStReg The stack register being accessed.
7689 */
7690IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7691{
7692 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7693 if (pFpuCtx->FCW & X86_FCW_IM)
7694 {
7695 /* Masked underflow. */
7696 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7697 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7698 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7699 if (iStReg != UINT8_MAX)
7700 {
7701 pFpuCtx->FTW |= RT_BIT(iReg);
7702 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7703 }
7704 }
7705 else
7706 {
7707 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7708 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7709 }
7710}
7711
7712
7713/**
7714 * Raises a FPU stack underflow exception.
7715 *
7716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7717 * @param iStReg The destination register that should be loaded
7718 * with QNaN if \#IS is not masked. Specify
7719 * UINT8_MAX if none (like for fcom).
7720 */
7721DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7722{
7723 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7724 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7725 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7726}
7727
7728
7729DECL_NO_INLINE(IEM_STATIC, void)
7730iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7731{
7732 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7733 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7734 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7735 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7736}
7737
7738
7739DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7740{
7741 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7742 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7743 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7744 iemFpuMaybePopOne(pFpuCtx);
7745}
7746
7747
7748DECL_NO_INLINE(IEM_STATIC, void)
7749iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7750{
7751 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7752 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7753 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7754 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7755 iemFpuMaybePopOne(pFpuCtx);
7756}
7757
7758
7759DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7760{
7761 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7762 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7763 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7764 iemFpuMaybePopOne(pFpuCtx);
7765 iemFpuMaybePopOne(pFpuCtx);
7766}
7767
7768
7769DECL_NO_INLINE(IEM_STATIC, void)
7770iemFpuStackPushUnderflow(PVMCPU pVCpu)
7771{
7772 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7773 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7774
7775 if (pFpuCtx->FCW & X86_FCW_IM)
7776 {
7777 /* Masked overflow - Push QNaN. */
7778 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7779 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7780 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7781 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7782 pFpuCtx->FTW |= RT_BIT(iNewTop);
7783 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7784 iemFpuRotateStackPush(pFpuCtx);
7785 }
7786 else
7787 {
7788 /* Exception pending - don't change TOP or the register stack. */
7789 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7790 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7791 }
7792}
7793
7794
7795DECL_NO_INLINE(IEM_STATIC, void)
7796iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7797{
7798 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7799 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7800
7801 if (pFpuCtx->FCW & X86_FCW_IM)
7802 {
7803 /* Masked overflow - Push QNaN. */
7804 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7805 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7806 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7807 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7808 pFpuCtx->FTW |= RT_BIT(iNewTop);
7809 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7810 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7811 iemFpuRotateStackPush(pFpuCtx);
7812 }
7813 else
7814 {
7815 /* Exception pending - don't change TOP or the register stack. */
7816 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7817 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7818 }
7819}
7820
7821
7822/**
7823 * Worker routine for raising an FPU stack overflow exception on a push.
7824 *
7825 * @param pFpuCtx The FPU context.
7826 */
7827IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7828{
7829 if (pFpuCtx->FCW & X86_FCW_IM)
7830 {
7831 /* Masked overflow. */
7832 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7833 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7834 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7835 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7836 pFpuCtx->FTW |= RT_BIT(iNewTop);
7837 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7838 iemFpuRotateStackPush(pFpuCtx);
7839 }
7840 else
7841 {
7842 /* Exception pending - don't change TOP or the register stack. */
7843 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7844 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7845 }
7846}
7847
7848
7849/**
7850 * Raises a FPU stack overflow exception on a push.
7851 *
7852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7853 */
7854DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7855{
7856 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7857 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7858 iemFpuStackPushOverflowOnly(pFpuCtx);
7859}
7860
7861
7862/**
7863 * Raises a FPU stack overflow exception on a push with a memory operand.
7864 *
7865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7866 * @param iEffSeg The effective memory operand selector register.
7867 * @param GCPtrEff The effective memory operand offset.
7868 */
7869DECL_NO_INLINE(IEM_STATIC, void)
7870iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7871{
7872 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7873 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7874 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7875 iemFpuStackPushOverflowOnly(pFpuCtx);
7876}
7877
7878
7879IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7880{
7881 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7882 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7883 if (pFpuCtx->FTW & RT_BIT(iReg))
7884 return VINF_SUCCESS;
7885 return VERR_NOT_FOUND;
7886}
7887
7888
7889IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7890{
7891 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7892 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7893 if (pFpuCtx->FTW & RT_BIT(iReg))
7894 {
7895 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7896 return VINF_SUCCESS;
7897 }
7898 return VERR_NOT_FOUND;
7899}
7900
7901
7902IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7903 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7904{
7905 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7906 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7907 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7908 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7909 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7910 {
7911 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7912 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7913 return VINF_SUCCESS;
7914 }
7915 return VERR_NOT_FOUND;
7916}
7917
7918
7919IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7920{
7921 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7922 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7923 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7924 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7925 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7926 {
7927 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7928 return VINF_SUCCESS;
7929 }
7930 return VERR_NOT_FOUND;
7931}
7932
7933
7934/**
7935 * Updates the FPU exception status after FCW is changed.
7936 *
7937 * @param pFpuCtx The FPU context.
7938 */
7939IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7940{
7941 uint16_t u16Fsw = pFpuCtx->FSW;
7942 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7943 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7944 else
7945 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7946 pFpuCtx->FSW = u16Fsw;
7947}
7948
7949
7950/**
7951 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7952 *
7953 * @returns The full FTW.
7954 * @param pFpuCtx The FPU context.
7955 */
7956IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7957{
7958 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7959 uint16_t u16Ftw = 0;
7960 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7961 for (unsigned iSt = 0; iSt < 8; iSt++)
7962 {
7963 unsigned const iReg = (iSt + iTop) & 7;
7964 if (!(u8Ftw & RT_BIT(iReg)))
7965 u16Ftw |= 3 << (iReg * 2); /* empty */
7966 else
7967 {
7968 uint16_t uTag;
7969 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7970 if (pr80Reg->s.uExponent == 0x7fff)
7971 uTag = 2; /* Exponent is all 1's => Special. */
7972 else if (pr80Reg->s.uExponent == 0x0000)
7973 {
7974 if (pr80Reg->s.u64Mantissa == 0x0000)
7975 uTag = 1; /* All bits are zero => Zero. */
7976 else
7977 uTag = 2; /* Must be special. */
7978 }
7979 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7980 uTag = 0; /* Valid. */
7981 else
7982 uTag = 2; /* Must be special. */
7983
7984 u16Ftw |= uTag << (iReg * 2); /* empty */
7985 }
7986 }
7987
7988 return u16Ftw;
7989}
7990
7991
7992/**
7993 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7994 *
7995 * @returns The compressed FTW.
7996 * @param u16FullFtw The full FTW to convert.
7997 */
7998IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7999{
8000 uint8_t u8Ftw = 0;
8001 for (unsigned i = 0; i < 8; i++)
8002 {
8003 if ((u16FullFtw & 3) != 3 /*empty*/)
8004 u8Ftw |= RT_BIT(i);
8005 u16FullFtw >>= 2;
8006 }
8007
8008 return u8Ftw;
8009}
8010
8011/** @} */
8012
8013
8014/** @name Memory access.
8015 *
8016 * @{
8017 */
8018
8019
8020/**
8021 * Updates the IEMCPU::cbWritten counter if applicable.
8022 *
8023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8024 * @param fAccess The access being accounted for.
8025 * @param cbMem The access size.
8026 */
8027DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
8028{
8029 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8030 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8031 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8032}
8033
8034
8035/**
8036 * Checks if the given segment can be written to, raise the appropriate
8037 * exception if not.
8038 *
8039 * @returns VBox strict status code.
8040 *
8041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8042 * @param pHid Pointer to the hidden register.
8043 * @param iSegReg The register number.
8044 * @param pu64BaseAddr Where to return the base address to use for the
8045 * segment. (In 64-bit code it may differ from the
8046 * base in the hidden segment.)
8047 */
8048IEM_STATIC VBOXSTRICTRC
8049iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8050{
8051 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8052
8053 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8054 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8055 else
8056 {
8057 if (!pHid->Attr.n.u1Present)
8058 {
8059 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8060 AssertRelease(uSel == 0);
8061 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8062 return iemRaiseGeneralProtectionFault0(pVCpu);
8063 }
8064
8065 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8066 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8067 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8068 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8069 *pu64BaseAddr = pHid->u64Base;
8070 }
8071 return VINF_SUCCESS;
8072}
8073
8074
8075/**
8076 * Checks if the given segment can be read from, raise the appropriate
8077 * exception if not.
8078 *
8079 * @returns VBox strict status code.
8080 *
8081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8082 * @param pHid Pointer to the hidden register.
8083 * @param iSegReg The register number.
8084 * @param pu64BaseAddr Where to return the base address to use for the
8085 * segment. (In 64-bit code it may differ from the
8086 * base in the hidden segment.)
8087 */
8088IEM_STATIC VBOXSTRICTRC
8089iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8090{
8091 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8092
8093 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8094 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8095 else
8096 {
8097 if (!pHid->Attr.n.u1Present)
8098 {
8099 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8100 AssertRelease(uSel == 0);
8101 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8102 return iemRaiseGeneralProtectionFault0(pVCpu);
8103 }
8104
8105 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8106 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8107 *pu64BaseAddr = pHid->u64Base;
8108 }
8109 return VINF_SUCCESS;
8110}
8111
8112
8113/**
8114 * Applies the segment limit, base and attributes.
8115 *
8116 * This may raise a \#GP or \#SS.
8117 *
8118 * @returns VBox strict status code.
8119 *
8120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8121 * @param fAccess The kind of access which is being performed.
8122 * @param iSegReg The index of the segment register to apply.
8123 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8124 * TSS, ++).
8125 * @param cbMem The access size.
8126 * @param pGCPtrMem Pointer to the guest memory address to apply
8127 * segmentation to. Input and output parameter.
8128 */
8129IEM_STATIC VBOXSTRICTRC
8130iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8131{
8132 if (iSegReg == UINT8_MAX)
8133 return VINF_SUCCESS;
8134
8135 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8136 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8137 switch (pVCpu->iem.s.enmCpuMode)
8138 {
8139 case IEMMODE_16BIT:
8140 case IEMMODE_32BIT:
8141 {
8142 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8143 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8144
8145 if ( pSel->Attr.n.u1Present
8146 && !pSel->Attr.n.u1Unusable)
8147 {
8148 Assert(pSel->Attr.n.u1DescType);
8149 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8150 {
8151 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8152 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8153 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8154
8155 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8156 {
8157 /** @todo CPL check. */
8158 }
8159
8160 /*
8161 * There are two kinds of data selectors, normal and expand down.
8162 */
8163 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8164 {
8165 if ( GCPtrFirst32 > pSel->u32Limit
8166 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8167 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8168 }
8169 else
8170 {
8171 /*
8172 * The upper boundary is defined by the B bit, not the G bit!
8173 */
8174 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8175 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8176 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8177 }
8178 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8179 }
8180 else
8181 {
8182
8183 /*
8184 * Code selector and usually be used to read thru, writing is
8185 * only permitted in real and V8086 mode.
8186 */
8187 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8188 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8189 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8190 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8191 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8192
8193 if ( GCPtrFirst32 > pSel->u32Limit
8194 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8195 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8196
8197 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8198 {
8199 /** @todo CPL check. */
8200 }
8201
8202 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8203 }
8204 }
8205 else
8206 return iemRaiseGeneralProtectionFault0(pVCpu);
8207 return VINF_SUCCESS;
8208 }
8209
8210 case IEMMODE_64BIT:
8211 {
8212 RTGCPTR GCPtrMem = *pGCPtrMem;
8213 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8214 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8215
8216 Assert(cbMem >= 1);
8217 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8218 return VINF_SUCCESS;
8219 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8220 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8221 return iemRaiseGeneralProtectionFault0(pVCpu);
8222 }
8223
8224 default:
8225 AssertFailedReturn(VERR_IEM_IPE_7);
8226 }
8227}
8228
8229
8230/**
8231 * Translates a virtual address to a physical physical address and checks if we
8232 * can access the page as specified.
8233 *
8234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8235 * @param GCPtrMem The virtual address.
8236 * @param fAccess The intended access.
8237 * @param pGCPhysMem Where to return the physical address.
8238 */
8239IEM_STATIC VBOXSTRICTRC
8240iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8241{
8242 /** @todo Need a different PGM interface here. We're currently using
8243 * generic / REM interfaces. this won't cut it for R0 & RC. */
8244 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8245 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8246 RTGCPHYS GCPhys;
8247 uint64_t fFlags;
8248 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8249 if (RT_FAILURE(rc))
8250 {
8251 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8252 /** @todo Check unassigned memory in unpaged mode. */
8253 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8254 *pGCPhysMem = NIL_RTGCPHYS;
8255 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8256 }
8257
8258 /* If the page is writable and does not have the no-exec bit set, all
8259 access is allowed. Otherwise we'll have to check more carefully... */
8260 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8261 {
8262 /* Write to read only memory? */
8263 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8264 && !(fFlags & X86_PTE_RW)
8265 && ( (pVCpu->iem.s.uCpl == 3
8266 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8267 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8268 {
8269 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8270 *pGCPhysMem = NIL_RTGCPHYS;
8271 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8272 }
8273
8274 /* Kernel memory accessed by userland? */
8275 if ( !(fFlags & X86_PTE_US)
8276 && pVCpu->iem.s.uCpl == 3
8277 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8278 {
8279 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8280 *pGCPhysMem = NIL_RTGCPHYS;
8281 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8282 }
8283
8284 /* Executing non-executable memory? */
8285 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8286 && (fFlags & X86_PTE_PAE_NX)
8287 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8288 {
8289 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8290 *pGCPhysMem = NIL_RTGCPHYS;
8291 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8292 VERR_ACCESS_DENIED);
8293 }
8294 }
8295
8296 /*
8297 * Set the dirty / access flags.
8298 * ASSUMES this is set when the address is translated rather than on committ...
8299 */
8300 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8301 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8302 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8303 {
8304 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8305 AssertRC(rc2);
8306 }
8307
8308 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8309 *pGCPhysMem = GCPhys;
8310 return VINF_SUCCESS;
8311}
8312
8313
8314
8315/**
8316 * Maps a physical page.
8317 *
8318 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8320 * @param GCPhysMem The physical address.
8321 * @param fAccess The intended access.
8322 * @param ppvMem Where to return the mapping address.
8323 * @param pLock The PGM lock.
8324 */
8325IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8326{
8327#ifdef IEM_LOG_MEMORY_WRITES
8328 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8329 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8330#endif
8331
8332 /** @todo This API may require some improving later. A private deal with PGM
8333 * regarding locking and unlocking needs to be struct. A couple of TLBs
8334 * living in PGM, but with publicly accessible inlined access methods
8335 * could perhaps be an even better solution. */
8336 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8337 GCPhysMem,
8338 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8339 pVCpu->iem.s.fBypassHandlers,
8340 ppvMem,
8341 pLock);
8342 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8343 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8344
8345 return rc;
8346}
8347
8348
8349/**
8350 * Unmap a page previously mapped by iemMemPageMap.
8351 *
8352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8353 * @param GCPhysMem The physical address.
8354 * @param fAccess The intended access.
8355 * @param pvMem What iemMemPageMap returned.
8356 * @param pLock The PGM lock.
8357 */
8358DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8359{
8360 NOREF(pVCpu);
8361 NOREF(GCPhysMem);
8362 NOREF(fAccess);
8363 NOREF(pvMem);
8364 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8365}
8366
8367
8368/**
8369 * Looks up a memory mapping entry.
8370 *
8371 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8373 * @param pvMem The memory address.
8374 * @param fAccess The access to.
8375 */
8376DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8377{
8378 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8379 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8380 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8381 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8382 return 0;
8383 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8384 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8385 return 1;
8386 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8387 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8388 return 2;
8389 return VERR_NOT_FOUND;
8390}
8391
8392
8393/**
8394 * Finds a free memmap entry when using iNextMapping doesn't work.
8395 *
8396 * @returns Memory mapping index, 1024 on failure.
8397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8398 */
8399IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8400{
8401 /*
8402 * The easy case.
8403 */
8404 if (pVCpu->iem.s.cActiveMappings == 0)
8405 {
8406 pVCpu->iem.s.iNextMapping = 1;
8407 return 0;
8408 }
8409
8410 /* There should be enough mappings for all instructions. */
8411 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8412
8413 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8414 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8415 return i;
8416
8417 AssertFailedReturn(1024);
8418}
8419
8420
8421/**
8422 * Commits a bounce buffer that needs writing back and unmaps it.
8423 *
8424 * @returns Strict VBox status code.
8425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8426 * @param iMemMap The index of the buffer to commit.
8427 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8428 * Always false in ring-3, obviously.
8429 */
8430IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8431{
8432 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8433 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8434#ifdef IN_RING3
8435 Assert(!fPostponeFail);
8436 RT_NOREF_PV(fPostponeFail);
8437#endif
8438
8439 /*
8440 * Do the writing.
8441 */
8442 PVM pVM = pVCpu->CTX_SUFF(pVM);
8443 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8444 {
8445 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8446 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8447 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8448 if (!pVCpu->iem.s.fBypassHandlers)
8449 {
8450 /*
8451 * Carefully and efficiently dealing with access handler return
8452 * codes make this a little bloated.
8453 */
8454 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8455 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8456 pbBuf,
8457 cbFirst,
8458 PGMACCESSORIGIN_IEM);
8459 if (rcStrict == VINF_SUCCESS)
8460 {
8461 if (cbSecond)
8462 {
8463 rcStrict = PGMPhysWrite(pVM,
8464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8465 pbBuf + cbFirst,
8466 cbSecond,
8467 PGMACCESSORIGIN_IEM);
8468 if (rcStrict == VINF_SUCCESS)
8469 { /* nothing */ }
8470 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8471 {
8472 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8475 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8476 }
8477#ifndef IN_RING3
8478 else if (fPostponeFail)
8479 {
8480 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8481 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8483 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8484 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8485 return iemSetPassUpStatus(pVCpu, rcStrict);
8486 }
8487#endif
8488 else
8489 {
8490 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8491 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8492 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8493 return rcStrict;
8494 }
8495 }
8496 }
8497 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8498 {
8499 if (!cbSecond)
8500 {
8501 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8503 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8504 }
8505 else
8506 {
8507 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8508 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8509 pbBuf + cbFirst,
8510 cbSecond,
8511 PGMACCESSORIGIN_IEM);
8512 if (rcStrict2 == VINF_SUCCESS)
8513 {
8514 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8515 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8516 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8517 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8518 }
8519 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8520 {
8521 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8523 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8524 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8525 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8526 }
8527#ifndef IN_RING3
8528 else if (fPostponeFail)
8529 {
8530 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8531 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8532 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8533 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8534 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8535 return iemSetPassUpStatus(pVCpu, rcStrict);
8536 }
8537#endif
8538 else
8539 {
8540 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8541 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8542 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8543 return rcStrict2;
8544 }
8545 }
8546 }
8547#ifndef IN_RING3
8548 else if (fPostponeFail)
8549 {
8550 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8551 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8552 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8553 if (!cbSecond)
8554 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8555 else
8556 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8557 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8558 return iemSetPassUpStatus(pVCpu, rcStrict);
8559 }
8560#endif
8561 else
8562 {
8563 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8564 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8565 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8566 return rcStrict;
8567 }
8568 }
8569 else
8570 {
8571 /*
8572 * No access handlers, much simpler.
8573 */
8574 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8575 if (RT_SUCCESS(rc))
8576 {
8577 if (cbSecond)
8578 {
8579 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8580 if (RT_SUCCESS(rc))
8581 { /* likely */ }
8582 else
8583 {
8584 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8585 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8586 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8587 return rc;
8588 }
8589 }
8590 }
8591 else
8592 {
8593 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8594 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8595 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8596 return rc;
8597 }
8598 }
8599 }
8600
8601#if defined(IEM_LOG_MEMORY_WRITES)
8602 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8603 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8604 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8605 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8606 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8607 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8608
8609 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8610 g_cbIemWrote = cbWrote;
8611 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8612#endif
8613
8614 /*
8615 * Free the mapping entry.
8616 */
8617 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8618 Assert(pVCpu->iem.s.cActiveMappings != 0);
8619 pVCpu->iem.s.cActiveMappings--;
8620 return VINF_SUCCESS;
8621}
8622
8623
8624/**
8625 * iemMemMap worker that deals with a request crossing pages.
8626 */
8627IEM_STATIC VBOXSTRICTRC
8628iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8629{
8630 /*
8631 * Do the address translations.
8632 */
8633 RTGCPHYS GCPhysFirst;
8634 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8635 if (rcStrict != VINF_SUCCESS)
8636 return rcStrict;
8637
8638 RTGCPHYS GCPhysSecond;
8639 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8640 fAccess, &GCPhysSecond);
8641 if (rcStrict != VINF_SUCCESS)
8642 return rcStrict;
8643 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8644
8645 PVM pVM = pVCpu->CTX_SUFF(pVM);
8646
8647 /*
8648 * Read in the current memory content if it's a read, execute or partial
8649 * write access.
8650 */
8651 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8652 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8653 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8654
8655 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8656 {
8657 if (!pVCpu->iem.s.fBypassHandlers)
8658 {
8659 /*
8660 * Must carefully deal with access handler status codes here,
8661 * makes the code a bit bloated.
8662 */
8663 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8664 if (rcStrict == VINF_SUCCESS)
8665 {
8666 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8667 if (rcStrict == VINF_SUCCESS)
8668 { /*likely */ }
8669 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8670 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8671 else
8672 {
8673 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8674 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8675 return rcStrict;
8676 }
8677 }
8678 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8679 {
8680 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8681 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8682 {
8683 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8684 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8685 }
8686 else
8687 {
8688 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8689 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8690 return rcStrict2;
8691 }
8692 }
8693 else
8694 {
8695 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8696 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8697 return rcStrict;
8698 }
8699 }
8700 else
8701 {
8702 /*
8703 * No informational status codes here, much more straight forward.
8704 */
8705 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8706 if (RT_SUCCESS(rc))
8707 {
8708 Assert(rc == VINF_SUCCESS);
8709 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8710 if (RT_SUCCESS(rc))
8711 Assert(rc == VINF_SUCCESS);
8712 else
8713 {
8714 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8715 return rc;
8716 }
8717 }
8718 else
8719 {
8720 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8721 return rc;
8722 }
8723 }
8724 }
8725#ifdef VBOX_STRICT
8726 else
8727 memset(pbBuf, 0xcc, cbMem);
8728 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8729 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8730#endif
8731
8732 /*
8733 * Commit the bounce buffer entry.
8734 */
8735 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8736 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8737 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8738 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8739 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8740 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8741 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8742 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8743 pVCpu->iem.s.cActiveMappings++;
8744
8745 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8746 *ppvMem = pbBuf;
8747 return VINF_SUCCESS;
8748}
8749
8750
8751/**
8752 * iemMemMap woker that deals with iemMemPageMap failures.
8753 */
8754IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8755 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8756{
8757 /*
8758 * Filter out conditions we can handle and the ones which shouldn't happen.
8759 */
8760 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8761 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8762 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8763 {
8764 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8765 return rcMap;
8766 }
8767 pVCpu->iem.s.cPotentialExits++;
8768
8769 /*
8770 * Read in the current memory content if it's a read, execute or partial
8771 * write access.
8772 */
8773 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8774 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8775 {
8776 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8777 memset(pbBuf, 0xff, cbMem);
8778 else
8779 {
8780 int rc;
8781 if (!pVCpu->iem.s.fBypassHandlers)
8782 {
8783 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8784 if (rcStrict == VINF_SUCCESS)
8785 { /* nothing */ }
8786 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8787 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8788 else
8789 {
8790 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8791 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8792 return rcStrict;
8793 }
8794 }
8795 else
8796 {
8797 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8798 if (RT_SUCCESS(rc))
8799 { /* likely */ }
8800 else
8801 {
8802 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8803 GCPhysFirst, rc));
8804 return rc;
8805 }
8806 }
8807 }
8808 }
8809#ifdef VBOX_STRICT
8810 else
8811 memset(pbBuf, 0xcc, cbMem);
8812#endif
8813#ifdef VBOX_STRICT
8814 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8815 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8816#endif
8817
8818 /*
8819 * Commit the bounce buffer entry.
8820 */
8821 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8822 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8823 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8824 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8825 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8826 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8827 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8828 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8829 pVCpu->iem.s.cActiveMappings++;
8830
8831 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8832 *ppvMem = pbBuf;
8833 return VINF_SUCCESS;
8834}
8835
8836
8837
8838/**
8839 * Maps the specified guest memory for the given kind of access.
8840 *
8841 * This may be using bounce buffering of the memory if it's crossing a page
8842 * boundary or if there is an access handler installed for any of it. Because
8843 * of lock prefix guarantees, we're in for some extra clutter when this
8844 * happens.
8845 *
8846 * This may raise a \#GP, \#SS, \#PF or \#AC.
8847 *
8848 * @returns VBox strict status code.
8849 *
8850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8851 * @param ppvMem Where to return the pointer to the mapped
8852 * memory.
8853 * @param cbMem The number of bytes to map. This is usually 1,
8854 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8855 * string operations it can be up to a page.
8856 * @param iSegReg The index of the segment register to use for
8857 * this access. The base and limits are checked.
8858 * Use UINT8_MAX to indicate that no segmentation
8859 * is required (for IDT, GDT and LDT accesses).
8860 * @param GCPtrMem The address of the guest memory.
8861 * @param fAccess How the memory is being accessed. The
8862 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8863 * how to map the memory, while the
8864 * IEM_ACCESS_WHAT_XXX bit is used when raising
8865 * exceptions.
8866 */
8867IEM_STATIC VBOXSTRICTRC
8868iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8869{
8870 /*
8871 * Check the input and figure out which mapping entry to use.
8872 */
8873 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8874 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8875 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8876
8877 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8878 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8879 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8880 {
8881 iMemMap = iemMemMapFindFree(pVCpu);
8882 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8883 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8884 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8885 pVCpu->iem.s.aMemMappings[2].fAccess),
8886 VERR_IEM_IPE_9);
8887 }
8888
8889 /*
8890 * Map the memory, checking that we can actually access it. If something
8891 * slightly complicated happens, fall back on bounce buffering.
8892 */
8893 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8894 if (rcStrict != VINF_SUCCESS)
8895 return rcStrict;
8896
8897 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8898 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8899
8900 RTGCPHYS GCPhysFirst;
8901 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8902 if (rcStrict != VINF_SUCCESS)
8903 return rcStrict;
8904
8905 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8906 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8907 if (fAccess & IEM_ACCESS_TYPE_READ)
8908 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8909
8910 void *pvMem;
8911 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8912 if (rcStrict != VINF_SUCCESS)
8913 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8914
8915 /*
8916 * Fill in the mapping table entry.
8917 */
8918 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8919 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8920 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8921 pVCpu->iem.s.cActiveMappings++;
8922
8923 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8924 *ppvMem = pvMem;
8925
8926 return VINF_SUCCESS;
8927}
8928
8929
8930/**
8931 * Commits the guest memory if bounce buffered and unmaps it.
8932 *
8933 * @returns Strict VBox status code.
8934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8935 * @param pvMem The mapping.
8936 * @param fAccess The kind of access.
8937 */
8938IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8939{
8940 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8941 AssertReturn(iMemMap >= 0, iMemMap);
8942
8943 /* If it's bounce buffered, we may need to write back the buffer. */
8944 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8945 {
8946 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8947 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8948 }
8949 /* Otherwise unlock it. */
8950 else
8951 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8952
8953 /* Free the entry. */
8954 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8955 Assert(pVCpu->iem.s.cActiveMappings != 0);
8956 pVCpu->iem.s.cActiveMappings--;
8957 return VINF_SUCCESS;
8958}
8959
8960#ifdef IEM_WITH_SETJMP
8961
8962/**
8963 * Maps the specified guest memory for the given kind of access, longjmp on
8964 * error.
8965 *
8966 * This may be using bounce buffering of the memory if it's crossing a page
8967 * boundary or if there is an access handler installed for any of it. Because
8968 * of lock prefix guarantees, we're in for some extra clutter when this
8969 * happens.
8970 *
8971 * This may raise a \#GP, \#SS, \#PF or \#AC.
8972 *
8973 * @returns Pointer to the mapped memory.
8974 *
8975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8976 * @param cbMem The number of bytes to map. This is usually 1,
8977 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8978 * string operations it can be up to a page.
8979 * @param iSegReg The index of the segment register to use for
8980 * this access. The base and limits are checked.
8981 * Use UINT8_MAX to indicate that no segmentation
8982 * is required (for IDT, GDT and LDT accesses).
8983 * @param GCPtrMem The address of the guest memory.
8984 * @param fAccess How the memory is being accessed. The
8985 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8986 * how to map the memory, while the
8987 * IEM_ACCESS_WHAT_XXX bit is used when raising
8988 * exceptions.
8989 */
8990IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8991{
8992 /*
8993 * Check the input and figure out which mapping entry to use.
8994 */
8995 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8996 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8997 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8998
8999 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9000 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9001 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9002 {
9003 iMemMap = iemMemMapFindFree(pVCpu);
9004 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9005 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9006 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9007 pVCpu->iem.s.aMemMappings[2].fAccess),
9008 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9009 }
9010
9011 /*
9012 * Map the memory, checking that we can actually access it. If something
9013 * slightly complicated happens, fall back on bounce buffering.
9014 */
9015 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9016 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9017 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9018
9019 /* Crossing a page boundary? */
9020 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9021 { /* No (likely). */ }
9022 else
9023 {
9024 void *pvMem;
9025 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9026 if (rcStrict == VINF_SUCCESS)
9027 return pvMem;
9028 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9029 }
9030
9031 RTGCPHYS GCPhysFirst;
9032 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9033 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9034 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9035
9036 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9037 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9038 if (fAccess & IEM_ACCESS_TYPE_READ)
9039 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9040
9041 void *pvMem;
9042 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9043 if (rcStrict == VINF_SUCCESS)
9044 { /* likely */ }
9045 else
9046 {
9047 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9048 if (rcStrict == VINF_SUCCESS)
9049 return pvMem;
9050 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9051 }
9052
9053 /*
9054 * Fill in the mapping table entry.
9055 */
9056 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9057 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9058 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9059 pVCpu->iem.s.cActiveMappings++;
9060
9061 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9062 return pvMem;
9063}
9064
9065
9066/**
9067 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9068 *
9069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9070 * @param pvMem The mapping.
9071 * @param fAccess The kind of access.
9072 */
9073IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9074{
9075 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9076 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9077
9078 /* If it's bounce buffered, we may need to write back the buffer. */
9079 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9080 {
9081 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9082 {
9083 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9084 if (rcStrict == VINF_SUCCESS)
9085 return;
9086 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9087 }
9088 }
9089 /* Otherwise unlock it. */
9090 else
9091 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9092
9093 /* Free the entry. */
9094 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9095 Assert(pVCpu->iem.s.cActiveMappings != 0);
9096 pVCpu->iem.s.cActiveMappings--;
9097}
9098
9099#endif /* IEM_WITH_SETJMP */
9100
9101#ifndef IN_RING3
9102/**
9103 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9104 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9105 *
9106 * Allows the instruction to be completed and retired, while the IEM user will
9107 * return to ring-3 immediately afterwards and do the postponed writes there.
9108 *
9109 * @returns VBox status code (no strict statuses). Caller must check
9110 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9112 * @param pvMem The mapping.
9113 * @param fAccess The kind of access.
9114 */
9115IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9116{
9117 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9118 AssertReturn(iMemMap >= 0, iMemMap);
9119
9120 /* If it's bounce buffered, we may need to write back the buffer. */
9121 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9122 {
9123 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9124 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9125 }
9126 /* Otherwise unlock it. */
9127 else
9128 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9129
9130 /* Free the entry. */
9131 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9132 Assert(pVCpu->iem.s.cActiveMappings != 0);
9133 pVCpu->iem.s.cActiveMappings--;
9134 return VINF_SUCCESS;
9135}
9136#endif
9137
9138
9139/**
9140 * Rollbacks mappings, releasing page locks and such.
9141 *
9142 * The caller shall only call this after checking cActiveMappings.
9143 *
9144 * @returns Strict VBox status code to pass up.
9145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9146 */
9147IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9148{
9149 Assert(pVCpu->iem.s.cActiveMappings > 0);
9150
9151 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9152 while (iMemMap-- > 0)
9153 {
9154 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9155 if (fAccess != IEM_ACCESS_INVALID)
9156 {
9157 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9158 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9159 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9160 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9161 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9162 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9163 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9164 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9165 pVCpu->iem.s.cActiveMappings--;
9166 }
9167 }
9168}
9169
9170
9171/**
9172 * Fetches a data byte.
9173 *
9174 * @returns Strict VBox status code.
9175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9176 * @param pu8Dst Where to return the byte.
9177 * @param iSegReg The index of the segment register to use for
9178 * this access. The base and limits are checked.
9179 * @param GCPtrMem The address of the guest memory.
9180 */
9181IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9182{
9183 /* The lazy approach for now... */
9184 uint8_t const *pu8Src;
9185 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9186 if (rc == VINF_SUCCESS)
9187 {
9188 *pu8Dst = *pu8Src;
9189 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9190 }
9191 return rc;
9192}
9193
9194
9195#ifdef IEM_WITH_SETJMP
9196/**
9197 * Fetches a data byte, longjmp on error.
9198 *
9199 * @returns The byte.
9200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9201 * @param iSegReg The index of the segment register to use for
9202 * this access. The base and limits are checked.
9203 * @param GCPtrMem The address of the guest memory.
9204 */
9205DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9206{
9207 /* The lazy approach for now... */
9208 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9209 uint8_t const bRet = *pu8Src;
9210 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9211 return bRet;
9212}
9213#endif /* IEM_WITH_SETJMP */
9214
9215
9216/**
9217 * Fetches a data word.
9218 *
9219 * @returns Strict VBox status code.
9220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9221 * @param pu16Dst Where to return the word.
9222 * @param iSegReg The index of the segment register to use for
9223 * this access. The base and limits are checked.
9224 * @param GCPtrMem The address of the guest memory.
9225 */
9226IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9227{
9228 /* The lazy approach for now... */
9229 uint16_t const *pu16Src;
9230 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9231 if (rc == VINF_SUCCESS)
9232 {
9233 *pu16Dst = *pu16Src;
9234 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9235 }
9236 return rc;
9237}
9238
9239
9240#ifdef IEM_WITH_SETJMP
9241/**
9242 * Fetches a data word, longjmp on error.
9243 *
9244 * @returns The word
9245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9246 * @param iSegReg The index of the segment register to use for
9247 * this access. The base and limits are checked.
9248 * @param GCPtrMem The address of the guest memory.
9249 */
9250DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9251{
9252 /* The lazy approach for now... */
9253 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9254 uint16_t const u16Ret = *pu16Src;
9255 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9256 return u16Ret;
9257}
9258#endif
9259
9260
9261/**
9262 * Fetches a data dword.
9263 *
9264 * @returns Strict VBox status code.
9265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9266 * @param pu32Dst Where to return the dword.
9267 * @param iSegReg The index of the segment register to use for
9268 * this access. The base and limits are checked.
9269 * @param GCPtrMem The address of the guest memory.
9270 */
9271IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9272{
9273 /* The lazy approach for now... */
9274 uint32_t const *pu32Src;
9275 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9276 if (rc == VINF_SUCCESS)
9277 {
9278 *pu32Dst = *pu32Src;
9279 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9280 }
9281 return rc;
9282}
9283
9284
9285#ifdef IEM_WITH_SETJMP
9286
9287IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9288{
9289 Assert(cbMem >= 1);
9290 Assert(iSegReg < X86_SREG_COUNT);
9291
9292 /*
9293 * 64-bit mode is simpler.
9294 */
9295 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9296 {
9297 if (iSegReg >= X86_SREG_FS)
9298 {
9299 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9300 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9301 GCPtrMem += pSel->u64Base;
9302 }
9303
9304 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9305 return GCPtrMem;
9306 }
9307 /*
9308 * 16-bit and 32-bit segmentation.
9309 */
9310 else
9311 {
9312 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9313 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9314 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9315 == X86DESCATTR_P /* data, expand up */
9316 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9317 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9318 {
9319 /* expand up */
9320 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9321 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9322 && GCPtrLast32 > (uint32_t)GCPtrMem))
9323 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9324 }
9325 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9326 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9327 {
9328 /* expand down */
9329 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9330 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9331 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9332 && GCPtrLast32 > (uint32_t)GCPtrMem))
9333 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9334 }
9335 else
9336 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9337 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9338 }
9339 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9340}
9341
9342
9343IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9344{
9345 Assert(cbMem >= 1);
9346 Assert(iSegReg < X86_SREG_COUNT);
9347
9348 /*
9349 * 64-bit mode is simpler.
9350 */
9351 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9352 {
9353 if (iSegReg >= X86_SREG_FS)
9354 {
9355 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9356 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9357 GCPtrMem += pSel->u64Base;
9358 }
9359
9360 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9361 return GCPtrMem;
9362 }
9363 /*
9364 * 16-bit and 32-bit segmentation.
9365 */
9366 else
9367 {
9368 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9369 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9370 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9371 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9372 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9373 {
9374 /* expand up */
9375 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9376 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9377 && GCPtrLast32 > (uint32_t)GCPtrMem))
9378 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9379 }
9380 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9381 {
9382 /* expand down */
9383 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9384 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9385 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9386 && GCPtrLast32 > (uint32_t)GCPtrMem))
9387 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9388 }
9389 else
9390 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9391 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9392 }
9393 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9394}
9395
9396
9397/**
9398 * Fetches a data dword, longjmp on error, fallback/safe version.
9399 *
9400 * @returns The dword
9401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9402 * @param iSegReg The index of the segment register to use for
9403 * this access. The base and limits are checked.
9404 * @param GCPtrMem The address of the guest memory.
9405 */
9406IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9407{
9408 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9409 uint32_t const u32Ret = *pu32Src;
9410 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9411 return u32Ret;
9412}
9413
9414
9415/**
9416 * Fetches a data dword, longjmp on error.
9417 *
9418 * @returns The dword
9419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9420 * @param iSegReg The index of the segment register to use for
9421 * this access. The base and limits are checked.
9422 * @param GCPtrMem The address of the guest memory.
9423 */
9424DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9425{
9426# ifdef IEM_WITH_DATA_TLB
9427 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9428 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9429 {
9430 /// @todo more later.
9431 }
9432
9433 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9434# else
9435 /* The lazy approach. */
9436 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9437 uint32_t const u32Ret = *pu32Src;
9438 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9439 return u32Ret;
9440# endif
9441}
9442#endif
9443
9444
9445#ifdef SOME_UNUSED_FUNCTION
9446/**
9447 * Fetches a data dword and sign extends it to a qword.
9448 *
9449 * @returns Strict VBox status code.
9450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9451 * @param pu64Dst Where to return the sign extended value.
9452 * @param iSegReg The index of the segment register to use for
9453 * this access. The base and limits are checked.
9454 * @param GCPtrMem The address of the guest memory.
9455 */
9456IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9457{
9458 /* The lazy approach for now... */
9459 int32_t const *pi32Src;
9460 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9461 if (rc == VINF_SUCCESS)
9462 {
9463 *pu64Dst = *pi32Src;
9464 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9465 }
9466#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9467 else
9468 *pu64Dst = 0;
9469#endif
9470 return rc;
9471}
9472#endif
9473
9474
9475/**
9476 * Fetches a data qword.
9477 *
9478 * @returns Strict VBox status code.
9479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9480 * @param pu64Dst Where to return the qword.
9481 * @param iSegReg The index of the segment register to use for
9482 * this access. The base and limits are checked.
9483 * @param GCPtrMem The address of the guest memory.
9484 */
9485IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9486{
9487 /* The lazy approach for now... */
9488 uint64_t const *pu64Src;
9489 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9490 if (rc == VINF_SUCCESS)
9491 {
9492 *pu64Dst = *pu64Src;
9493 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9494 }
9495 return rc;
9496}
9497
9498
9499#ifdef IEM_WITH_SETJMP
9500/**
9501 * Fetches a data qword, longjmp on error.
9502 *
9503 * @returns The qword.
9504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9505 * @param iSegReg The index of the segment register to use for
9506 * this access. The base and limits are checked.
9507 * @param GCPtrMem The address of the guest memory.
9508 */
9509DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9510{
9511 /* The lazy approach for now... */
9512 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9513 uint64_t const u64Ret = *pu64Src;
9514 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9515 return u64Ret;
9516}
9517#endif
9518
9519
9520/**
9521 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9522 *
9523 * @returns Strict VBox status code.
9524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9525 * @param pu64Dst Where to return the qword.
9526 * @param iSegReg The index of the segment register to use for
9527 * this access. The base and limits are checked.
9528 * @param GCPtrMem The address of the guest memory.
9529 */
9530IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9531{
9532 /* The lazy approach for now... */
9533 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9534 if (RT_UNLIKELY(GCPtrMem & 15))
9535 return iemRaiseGeneralProtectionFault0(pVCpu);
9536
9537 uint64_t const *pu64Src;
9538 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9539 if (rc == VINF_SUCCESS)
9540 {
9541 *pu64Dst = *pu64Src;
9542 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9543 }
9544 return rc;
9545}
9546
9547
9548#ifdef IEM_WITH_SETJMP
9549/**
9550 * Fetches a data qword, longjmp on error.
9551 *
9552 * @returns The qword.
9553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9554 * @param iSegReg The index of the segment register to use for
9555 * this access. The base and limits are checked.
9556 * @param GCPtrMem The address of the guest memory.
9557 */
9558DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9559{
9560 /* The lazy approach for now... */
9561 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9562 if (RT_LIKELY(!(GCPtrMem & 15)))
9563 {
9564 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9565 uint64_t const u64Ret = *pu64Src;
9566 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9567 return u64Ret;
9568 }
9569
9570 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9571 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9572}
9573#endif
9574
9575
9576/**
9577 * Fetches a data tword.
9578 *
9579 * @returns Strict VBox status code.
9580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9581 * @param pr80Dst Where to return the tword.
9582 * @param iSegReg The index of the segment register to use for
9583 * this access. The base and limits are checked.
9584 * @param GCPtrMem The address of the guest memory.
9585 */
9586IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9587{
9588 /* The lazy approach for now... */
9589 PCRTFLOAT80U pr80Src;
9590 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9591 if (rc == VINF_SUCCESS)
9592 {
9593 *pr80Dst = *pr80Src;
9594 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9595 }
9596 return rc;
9597}
9598
9599
9600#ifdef IEM_WITH_SETJMP
9601/**
9602 * Fetches a data tword, longjmp on error.
9603 *
9604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9605 * @param pr80Dst Where to return the tword.
9606 * @param iSegReg The index of the segment register to use for
9607 * this access. The base and limits are checked.
9608 * @param GCPtrMem The address of the guest memory.
9609 */
9610DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9611{
9612 /* The lazy approach for now... */
9613 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9614 *pr80Dst = *pr80Src;
9615 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9616}
9617#endif
9618
9619
9620/**
9621 * Fetches a data dqword (double qword), generally SSE related.
9622 *
9623 * @returns Strict VBox status code.
9624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9625 * @param pu128Dst Where to return the qword.
9626 * @param iSegReg The index of the segment register to use for
9627 * this access. The base and limits are checked.
9628 * @param GCPtrMem The address of the guest memory.
9629 */
9630IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9631{
9632 /* The lazy approach for now... */
9633 PCRTUINT128U pu128Src;
9634 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9635 if (rc == VINF_SUCCESS)
9636 {
9637 pu128Dst->au64[0] = pu128Src->au64[0];
9638 pu128Dst->au64[1] = pu128Src->au64[1];
9639 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9640 }
9641 return rc;
9642}
9643
9644
9645#ifdef IEM_WITH_SETJMP
9646/**
9647 * Fetches a data dqword (double qword), generally SSE related.
9648 *
9649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9650 * @param pu128Dst Where to return the qword.
9651 * @param iSegReg The index of the segment register to use for
9652 * this access. The base and limits are checked.
9653 * @param GCPtrMem The address of the guest memory.
9654 */
9655IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9656{
9657 /* The lazy approach for now... */
9658 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9659 pu128Dst->au64[0] = pu128Src->au64[0];
9660 pu128Dst->au64[1] = pu128Src->au64[1];
9661 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9662}
9663#endif
9664
9665
9666/**
9667 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9668 * related.
9669 *
9670 * Raises \#GP(0) if not aligned.
9671 *
9672 * @returns Strict VBox status code.
9673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9674 * @param pu128Dst Where to return the qword.
9675 * @param iSegReg The index of the segment register to use for
9676 * this access. The base and limits are checked.
9677 * @param GCPtrMem The address of the guest memory.
9678 */
9679IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9680{
9681 /* The lazy approach for now... */
9682 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9683 if ( (GCPtrMem & 15)
9684 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9685 return iemRaiseGeneralProtectionFault0(pVCpu);
9686
9687 PCRTUINT128U pu128Src;
9688 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9689 if (rc == VINF_SUCCESS)
9690 {
9691 pu128Dst->au64[0] = pu128Src->au64[0];
9692 pu128Dst->au64[1] = pu128Src->au64[1];
9693 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9694 }
9695 return rc;
9696}
9697
9698
9699#ifdef IEM_WITH_SETJMP
9700/**
9701 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9702 * related, longjmp on error.
9703 *
9704 * Raises \#GP(0) if not aligned.
9705 *
9706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9707 * @param pu128Dst Where to return the qword.
9708 * @param iSegReg The index of the segment register to use for
9709 * this access. The base and limits are checked.
9710 * @param GCPtrMem The address of the guest memory.
9711 */
9712DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9713{
9714 /* The lazy approach for now... */
9715 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9716 if ( (GCPtrMem & 15) == 0
9717 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9718 {
9719 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9720 pu128Dst->au64[0] = pu128Src->au64[0];
9721 pu128Dst->au64[1] = pu128Src->au64[1];
9722 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9723 return;
9724 }
9725
9726 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9727 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9728}
9729#endif
9730
9731
9732/**
9733 * Fetches a data oword (octo word), generally AVX related.
9734 *
9735 * @returns Strict VBox status code.
9736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9737 * @param pu256Dst Where to return the qword.
9738 * @param iSegReg The index of the segment register to use for
9739 * this access. The base and limits are checked.
9740 * @param GCPtrMem The address of the guest memory.
9741 */
9742IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9743{
9744 /* The lazy approach for now... */
9745 PCRTUINT256U pu256Src;
9746 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9747 if (rc == VINF_SUCCESS)
9748 {
9749 pu256Dst->au64[0] = pu256Src->au64[0];
9750 pu256Dst->au64[1] = pu256Src->au64[1];
9751 pu256Dst->au64[2] = pu256Src->au64[2];
9752 pu256Dst->au64[3] = pu256Src->au64[3];
9753 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9754 }
9755 return rc;
9756}
9757
9758
9759#ifdef IEM_WITH_SETJMP
9760/**
9761 * Fetches a data oword (octo word), generally AVX related.
9762 *
9763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9764 * @param pu256Dst Where to return the qword.
9765 * @param iSegReg The index of the segment register to use for
9766 * this access. The base and limits are checked.
9767 * @param GCPtrMem The address of the guest memory.
9768 */
9769IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9770{
9771 /* The lazy approach for now... */
9772 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9773 pu256Dst->au64[0] = pu256Src->au64[0];
9774 pu256Dst->au64[1] = pu256Src->au64[1];
9775 pu256Dst->au64[2] = pu256Src->au64[2];
9776 pu256Dst->au64[3] = pu256Src->au64[3];
9777 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9778}
9779#endif
9780
9781
9782/**
9783 * Fetches a data oword (octo word) at an aligned address, generally AVX
9784 * related.
9785 *
9786 * Raises \#GP(0) if not aligned.
9787 *
9788 * @returns Strict VBox status code.
9789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9790 * @param pu256Dst Where to return the qword.
9791 * @param iSegReg The index of the segment register to use for
9792 * this access. The base and limits are checked.
9793 * @param GCPtrMem The address of the guest memory.
9794 */
9795IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9796{
9797 /* The lazy approach for now... */
9798 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9799 if (GCPtrMem & 31)
9800 return iemRaiseGeneralProtectionFault0(pVCpu);
9801
9802 PCRTUINT256U pu256Src;
9803 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9804 if (rc == VINF_SUCCESS)
9805 {
9806 pu256Dst->au64[0] = pu256Src->au64[0];
9807 pu256Dst->au64[1] = pu256Src->au64[1];
9808 pu256Dst->au64[2] = pu256Src->au64[2];
9809 pu256Dst->au64[3] = pu256Src->au64[3];
9810 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9811 }
9812 return rc;
9813}
9814
9815
9816#ifdef IEM_WITH_SETJMP
9817/**
9818 * Fetches a data oword (octo word) at an aligned address, generally AVX
9819 * related, longjmp on error.
9820 *
9821 * Raises \#GP(0) if not aligned.
9822 *
9823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9824 * @param pu256Dst Where to return the qword.
9825 * @param iSegReg The index of the segment register to use for
9826 * this access. The base and limits are checked.
9827 * @param GCPtrMem The address of the guest memory.
9828 */
9829DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9830{
9831 /* The lazy approach for now... */
9832 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9833 if ((GCPtrMem & 31) == 0)
9834 {
9835 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9836 pu256Dst->au64[0] = pu256Src->au64[0];
9837 pu256Dst->au64[1] = pu256Src->au64[1];
9838 pu256Dst->au64[2] = pu256Src->au64[2];
9839 pu256Dst->au64[3] = pu256Src->au64[3];
9840 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9841 return;
9842 }
9843
9844 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9845 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9846}
9847#endif
9848
9849
9850
9851/**
9852 * Fetches a descriptor register (lgdt, lidt).
9853 *
9854 * @returns Strict VBox status code.
9855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9856 * @param pcbLimit Where to return the limit.
9857 * @param pGCPtrBase Where to return the base.
9858 * @param iSegReg The index of the segment register to use for
9859 * this access. The base and limits are checked.
9860 * @param GCPtrMem The address of the guest memory.
9861 * @param enmOpSize The effective operand size.
9862 */
9863IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9864 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9865{
9866 /*
9867 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9868 * little special:
9869 * - The two reads are done separately.
9870 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9871 * - We suspect the 386 to actually commit the limit before the base in
9872 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9873 * don't try emulate this eccentric behavior, because it's not well
9874 * enough understood and rather hard to trigger.
9875 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9876 */
9877 VBOXSTRICTRC rcStrict;
9878 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9879 {
9880 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9881 if (rcStrict == VINF_SUCCESS)
9882 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9883 }
9884 else
9885 {
9886 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9887 if (enmOpSize == IEMMODE_32BIT)
9888 {
9889 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9890 {
9891 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9892 if (rcStrict == VINF_SUCCESS)
9893 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9894 }
9895 else
9896 {
9897 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9898 if (rcStrict == VINF_SUCCESS)
9899 {
9900 *pcbLimit = (uint16_t)uTmp;
9901 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9902 }
9903 }
9904 if (rcStrict == VINF_SUCCESS)
9905 *pGCPtrBase = uTmp;
9906 }
9907 else
9908 {
9909 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9910 if (rcStrict == VINF_SUCCESS)
9911 {
9912 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9913 if (rcStrict == VINF_SUCCESS)
9914 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9915 }
9916 }
9917 }
9918 return rcStrict;
9919}
9920
9921
9922
9923/**
9924 * Stores a data byte.
9925 *
9926 * @returns Strict VBox status code.
9927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9928 * @param iSegReg The index of the segment register to use for
9929 * this access. The base and limits are checked.
9930 * @param GCPtrMem The address of the guest memory.
9931 * @param u8Value The value to store.
9932 */
9933IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9934{
9935 /* The lazy approach for now... */
9936 uint8_t *pu8Dst;
9937 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9938 if (rc == VINF_SUCCESS)
9939 {
9940 *pu8Dst = u8Value;
9941 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9942 }
9943 return rc;
9944}
9945
9946
9947#ifdef IEM_WITH_SETJMP
9948/**
9949 * Stores a data byte, longjmp on error.
9950 *
9951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9952 * @param iSegReg The index of the segment register to use for
9953 * this access. The base and limits are checked.
9954 * @param GCPtrMem The address of the guest memory.
9955 * @param u8Value The value to store.
9956 */
9957IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9958{
9959 /* The lazy approach for now... */
9960 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9961 *pu8Dst = u8Value;
9962 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9963}
9964#endif
9965
9966
9967/**
9968 * Stores a data word.
9969 *
9970 * @returns Strict VBox status code.
9971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9972 * @param iSegReg The index of the segment register to use for
9973 * this access. The base and limits are checked.
9974 * @param GCPtrMem The address of the guest memory.
9975 * @param u16Value The value to store.
9976 */
9977IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9978{
9979 /* The lazy approach for now... */
9980 uint16_t *pu16Dst;
9981 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9982 if (rc == VINF_SUCCESS)
9983 {
9984 *pu16Dst = u16Value;
9985 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9986 }
9987 return rc;
9988}
9989
9990
9991#ifdef IEM_WITH_SETJMP
9992/**
9993 * Stores a data word, longjmp on error.
9994 *
9995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9996 * @param iSegReg The index of the segment register to use for
9997 * this access. The base and limits are checked.
9998 * @param GCPtrMem The address of the guest memory.
9999 * @param u16Value The value to store.
10000 */
10001IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10002{
10003 /* The lazy approach for now... */
10004 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10005 *pu16Dst = u16Value;
10006 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10007}
10008#endif
10009
10010
10011/**
10012 * Stores a data dword.
10013 *
10014 * @returns Strict VBox status code.
10015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10016 * @param iSegReg The index of the segment register to use for
10017 * this access. The base and limits are checked.
10018 * @param GCPtrMem The address of the guest memory.
10019 * @param u32Value The value to store.
10020 */
10021IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10022{
10023 /* The lazy approach for now... */
10024 uint32_t *pu32Dst;
10025 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10026 if (rc == VINF_SUCCESS)
10027 {
10028 *pu32Dst = u32Value;
10029 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10030 }
10031 return rc;
10032}
10033
10034
10035#ifdef IEM_WITH_SETJMP
10036/**
10037 * Stores a data dword.
10038 *
10039 * @returns Strict VBox status code.
10040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10041 * @param iSegReg The index of the segment register to use for
10042 * this access. The base and limits are checked.
10043 * @param GCPtrMem The address of the guest memory.
10044 * @param u32Value The value to store.
10045 */
10046IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10047{
10048 /* The lazy approach for now... */
10049 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10050 *pu32Dst = u32Value;
10051 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10052}
10053#endif
10054
10055
10056/**
10057 * Stores a data qword.
10058 *
10059 * @returns Strict VBox status code.
10060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10061 * @param iSegReg The index of the segment register to use for
10062 * this access. The base and limits are checked.
10063 * @param GCPtrMem The address of the guest memory.
10064 * @param u64Value The value to store.
10065 */
10066IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10067{
10068 /* The lazy approach for now... */
10069 uint64_t *pu64Dst;
10070 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10071 if (rc == VINF_SUCCESS)
10072 {
10073 *pu64Dst = u64Value;
10074 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10075 }
10076 return rc;
10077}
10078
10079
10080#ifdef IEM_WITH_SETJMP
10081/**
10082 * Stores a data qword, longjmp on error.
10083 *
10084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10085 * @param iSegReg The index of the segment register to use for
10086 * this access. The base and limits are checked.
10087 * @param GCPtrMem The address of the guest memory.
10088 * @param u64Value The value to store.
10089 */
10090IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10091{
10092 /* The lazy approach for now... */
10093 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10094 *pu64Dst = u64Value;
10095 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10096}
10097#endif
10098
10099
10100/**
10101 * Stores a data dqword.
10102 *
10103 * @returns Strict VBox status code.
10104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10105 * @param iSegReg The index of the segment register to use for
10106 * this access. The base and limits are checked.
10107 * @param GCPtrMem The address of the guest memory.
10108 * @param u128Value The value to store.
10109 */
10110IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10111{
10112 /* The lazy approach for now... */
10113 PRTUINT128U pu128Dst;
10114 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10115 if (rc == VINF_SUCCESS)
10116 {
10117 pu128Dst->au64[0] = u128Value.au64[0];
10118 pu128Dst->au64[1] = u128Value.au64[1];
10119 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10120 }
10121 return rc;
10122}
10123
10124
10125#ifdef IEM_WITH_SETJMP
10126/**
10127 * Stores a data dqword, longjmp on error.
10128 *
10129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10130 * @param iSegReg The index of the segment register to use for
10131 * this access. The base and limits are checked.
10132 * @param GCPtrMem The address of the guest memory.
10133 * @param u128Value The value to store.
10134 */
10135IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10136{
10137 /* The lazy approach for now... */
10138 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10139 pu128Dst->au64[0] = u128Value.au64[0];
10140 pu128Dst->au64[1] = u128Value.au64[1];
10141 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10142}
10143#endif
10144
10145
10146/**
10147 * Stores a data dqword, SSE aligned.
10148 *
10149 * @returns Strict VBox status code.
10150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10151 * @param iSegReg The index of the segment register to use for
10152 * this access. The base and limits are checked.
10153 * @param GCPtrMem The address of the guest memory.
10154 * @param u128Value The value to store.
10155 */
10156IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10157{
10158 /* The lazy approach for now... */
10159 if ( (GCPtrMem & 15)
10160 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10161 return iemRaiseGeneralProtectionFault0(pVCpu);
10162
10163 PRTUINT128U pu128Dst;
10164 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10165 if (rc == VINF_SUCCESS)
10166 {
10167 pu128Dst->au64[0] = u128Value.au64[0];
10168 pu128Dst->au64[1] = u128Value.au64[1];
10169 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10170 }
10171 return rc;
10172}
10173
10174
10175#ifdef IEM_WITH_SETJMP
10176/**
10177 * Stores a data dqword, SSE aligned.
10178 *
10179 * @returns Strict VBox status code.
10180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10181 * @param iSegReg The index of the segment register to use for
10182 * this access. The base and limits are checked.
10183 * @param GCPtrMem The address of the guest memory.
10184 * @param u128Value The value to store.
10185 */
10186DECL_NO_INLINE(IEM_STATIC, void)
10187iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10188{
10189 /* The lazy approach for now... */
10190 if ( (GCPtrMem & 15) == 0
10191 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10192 {
10193 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10194 pu128Dst->au64[0] = u128Value.au64[0];
10195 pu128Dst->au64[1] = u128Value.au64[1];
10196 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10197 return;
10198 }
10199
10200 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10201 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10202}
10203#endif
10204
10205
10206/**
10207 * Stores a data dqword.
10208 *
10209 * @returns Strict VBox status code.
10210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10211 * @param iSegReg The index of the segment register to use for
10212 * this access. The base and limits are checked.
10213 * @param GCPtrMem The address of the guest memory.
10214 * @param pu256Value Pointer to the value to store.
10215 */
10216IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10217{
10218 /* The lazy approach for now... */
10219 PRTUINT256U pu256Dst;
10220 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10221 if (rc == VINF_SUCCESS)
10222 {
10223 pu256Dst->au64[0] = pu256Value->au64[0];
10224 pu256Dst->au64[1] = pu256Value->au64[1];
10225 pu256Dst->au64[2] = pu256Value->au64[2];
10226 pu256Dst->au64[3] = pu256Value->au64[3];
10227 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10228 }
10229 return rc;
10230}
10231
10232
10233#ifdef IEM_WITH_SETJMP
10234/**
10235 * Stores a data dqword, longjmp on error.
10236 *
10237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10238 * @param iSegReg The index of the segment register to use for
10239 * this access. The base and limits are checked.
10240 * @param GCPtrMem The address of the guest memory.
10241 * @param pu256Value Pointer to the value to store.
10242 */
10243IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10244{
10245 /* The lazy approach for now... */
10246 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10247 pu256Dst->au64[0] = pu256Value->au64[0];
10248 pu256Dst->au64[1] = pu256Value->au64[1];
10249 pu256Dst->au64[2] = pu256Value->au64[2];
10250 pu256Dst->au64[3] = pu256Value->au64[3];
10251 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10252}
10253#endif
10254
10255
10256/**
10257 * Stores a data dqword, AVX aligned.
10258 *
10259 * @returns Strict VBox status code.
10260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10261 * @param iSegReg The index of the segment register to use for
10262 * this access. The base and limits are checked.
10263 * @param GCPtrMem The address of the guest memory.
10264 * @param pu256Value Pointer to the value to store.
10265 */
10266IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10267{
10268 /* The lazy approach for now... */
10269 if (GCPtrMem & 31)
10270 return iemRaiseGeneralProtectionFault0(pVCpu);
10271
10272 PRTUINT256U pu256Dst;
10273 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10274 if (rc == VINF_SUCCESS)
10275 {
10276 pu256Dst->au64[0] = pu256Value->au64[0];
10277 pu256Dst->au64[1] = pu256Value->au64[1];
10278 pu256Dst->au64[2] = pu256Value->au64[2];
10279 pu256Dst->au64[3] = pu256Value->au64[3];
10280 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10281 }
10282 return rc;
10283}
10284
10285
10286#ifdef IEM_WITH_SETJMP
10287/**
10288 * Stores a data dqword, AVX aligned.
10289 *
10290 * @returns Strict VBox status code.
10291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10292 * @param iSegReg The index of the segment register to use for
10293 * this access. The base and limits are checked.
10294 * @param GCPtrMem The address of the guest memory.
10295 * @param pu256Value Pointer to the value to store.
10296 */
10297DECL_NO_INLINE(IEM_STATIC, void)
10298iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10299{
10300 /* The lazy approach for now... */
10301 if ((GCPtrMem & 31) == 0)
10302 {
10303 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10304 pu256Dst->au64[0] = pu256Value->au64[0];
10305 pu256Dst->au64[1] = pu256Value->au64[1];
10306 pu256Dst->au64[2] = pu256Value->au64[2];
10307 pu256Dst->au64[3] = pu256Value->au64[3];
10308 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10309 return;
10310 }
10311
10312 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10313 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10314}
10315#endif
10316
10317
10318/**
10319 * Stores a descriptor register (sgdt, sidt).
10320 *
10321 * @returns Strict VBox status code.
10322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10323 * @param cbLimit The limit.
10324 * @param GCPtrBase The base address.
10325 * @param iSegReg The index of the segment register to use for
10326 * this access. The base and limits are checked.
10327 * @param GCPtrMem The address of the guest memory.
10328 */
10329IEM_STATIC VBOXSTRICTRC
10330iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10331{
10332 /*
10333 * The SIDT and SGDT instructions actually stores the data using two
10334 * independent writes. The instructions does not respond to opsize prefixes.
10335 */
10336 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10337 if (rcStrict == VINF_SUCCESS)
10338 {
10339 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10340 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10341 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10342 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10343 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10344 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10345 else
10346 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10347 }
10348 return rcStrict;
10349}
10350
10351
10352/**
10353 * Pushes a word onto the stack.
10354 *
10355 * @returns Strict VBox status code.
10356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10357 * @param u16Value The value to push.
10358 */
10359IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10360{
10361 /* Increment the stack pointer. */
10362 uint64_t uNewRsp;
10363 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10364
10365 /* Write the word the lazy way. */
10366 uint16_t *pu16Dst;
10367 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10368 if (rc == VINF_SUCCESS)
10369 {
10370 *pu16Dst = u16Value;
10371 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10372 }
10373
10374 /* Commit the new RSP value unless we an access handler made trouble. */
10375 if (rc == VINF_SUCCESS)
10376 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10377
10378 return rc;
10379}
10380
10381
10382/**
10383 * Pushes a dword onto the stack.
10384 *
10385 * @returns Strict VBox status code.
10386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10387 * @param u32Value The value to push.
10388 */
10389IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10390{
10391 /* Increment the stack pointer. */
10392 uint64_t uNewRsp;
10393 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10394
10395 /* Write the dword the lazy way. */
10396 uint32_t *pu32Dst;
10397 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10398 if (rc == VINF_SUCCESS)
10399 {
10400 *pu32Dst = u32Value;
10401 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10402 }
10403
10404 /* Commit the new RSP value unless we an access handler made trouble. */
10405 if (rc == VINF_SUCCESS)
10406 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10407
10408 return rc;
10409}
10410
10411
10412/**
10413 * Pushes a dword segment register value onto the stack.
10414 *
10415 * @returns Strict VBox status code.
10416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10417 * @param u32Value The value to push.
10418 */
10419IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10420{
10421 /* Increment the stack pointer. */
10422 uint64_t uNewRsp;
10423 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10424
10425 /* The intel docs talks about zero extending the selector register
10426 value. My actual intel CPU here might be zero extending the value
10427 but it still only writes the lower word... */
10428 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10429 * happens when crossing an electric page boundrary, is the high word checked
10430 * for write accessibility or not? Probably it is. What about segment limits?
10431 * It appears this behavior is also shared with trap error codes.
10432 *
10433 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10434 * ancient hardware when it actually did change. */
10435 uint16_t *pu16Dst;
10436 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10437 if (rc == VINF_SUCCESS)
10438 {
10439 *pu16Dst = (uint16_t)u32Value;
10440 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10441 }
10442
10443 /* Commit the new RSP value unless we an access handler made trouble. */
10444 if (rc == VINF_SUCCESS)
10445 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10446
10447 return rc;
10448}
10449
10450
10451/**
10452 * Pushes a qword onto the stack.
10453 *
10454 * @returns Strict VBox status code.
10455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10456 * @param u64Value The value to push.
10457 */
10458IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10459{
10460 /* Increment the stack pointer. */
10461 uint64_t uNewRsp;
10462 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10463
10464 /* Write the word the lazy way. */
10465 uint64_t *pu64Dst;
10466 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10467 if (rc == VINF_SUCCESS)
10468 {
10469 *pu64Dst = u64Value;
10470 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10471 }
10472
10473 /* Commit the new RSP value unless we an access handler made trouble. */
10474 if (rc == VINF_SUCCESS)
10475 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10476
10477 return rc;
10478}
10479
10480
10481/**
10482 * Pops a word from the stack.
10483 *
10484 * @returns Strict VBox status code.
10485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10486 * @param pu16Value Where to store the popped value.
10487 */
10488IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10489{
10490 /* Increment the stack pointer. */
10491 uint64_t uNewRsp;
10492 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10493
10494 /* Write the word the lazy way. */
10495 uint16_t const *pu16Src;
10496 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10497 if (rc == VINF_SUCCESS)
10498 {
10499 *pu16Value = *pu16Src;
10500 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10501
10502 /* Commit the new RSP value. */
10503 if (rc == VINF_SUCCESS)
10504 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10505 }
10506
10507 return rc;
10508}
10509
10510
10511/**
10512 * Pops a dword from the stack.
10513 *
10514 * @returns Strict VBox status code.
10515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10516 * @param pu32Value Where to store the popped value.
10517 */
10518IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10519{
10520 /* Increment the stack pointer. */
10521 uint64_t uNewRsp;
10522 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10523
10524 /* Write the word the lazy way. */
10525 uint32_t const *pu32Src;
10526 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10527 if (rc == VINF_SUCCESS)
10528 {
10529 *pu32Value = *pu32Src;
10530 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10531
10532 /* Commit the new RSP value. */
10533 if (rc == VINF_SUCCESS)
10534 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10535 }
10536
10537 return rc;
10538}
10539
10540
10541/**
10542 * Pops a qword from the stack.
10543 *
10544 * @returns Strict VBox status code.
10545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10546 * @param pu64Value Where to store the popped value.
10547 */
10548IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10549{
10550 /* Increment the stack pointer. */
10551 uint64_t uNewRsp;
10552 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10553
10554 /* Write the word the lazy way. */
10555 uint64_t const *pu64Src;
10556 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10557 if (rc == VINF_SUCCESS)
10558 {
10559 *pu64Value = *pu64Src;
10560 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10561
10562 /* Commit the new RSP value. */
10563 if (rc == VINF_SUCCESS)
10564 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10565 }
10566
10567 return rc;
10568}
10569
10570
10571/**
10572 * Pushes a word onto the stack, using a temporary stack pointer.
10573 *
10574 * @returns Strict VBox status code.
10575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10576 * @param u16Value The value to push.
10577 * @param pTmpRsp Pointer to the temporary stack pointer.
10578 */
10579IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10580{
10581 /* Increment the stack pointer. */
10582 RTUINT64U NewRsp = *pTmpRsp;
10583 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10584
10585 /* Write the word the lazy way. */
10586 uint16_t *pu16Dst;
10587 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10588 if (rc == VINF_SUCCESS)
10589 {
10590 *pu16Dst = u16Value;
10591 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10592 }
10593
10594 /* Commit the new RSP value unless we an access handler made trouble. */
10595 if (rc == VINF_SUCCESS)
10596 *pTmpRsp = NewRsp;
10597
10598 return rc;
10599}
10600
10601
10602/**
10603 * Pushes a dword onto the stack, using a temporary stack pointer.
10604 *
10605 * @returns Strict VBox status code.
10606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10607 * @param u32Value The value to push.
10608 * @param pTmpRsp Pointer to the temporary stack pointer.
10609 */
10610IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10611{
10612 /* Increment the stack pointer. */
10613 RTUINT64U NewRsp = *pTmpRsp;
10614 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10615
10616 /* Write the word the lazy way. */
10617 uint32_t *pu32Dst;
10618 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10619 if (rc == VINF_SUCCESS)
10620 {
10621 *pu32Dst = u32Value;
10622 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10623 }
10624
10625 /* Commit the new RSP value unless we an access handler made trouble. */
10626 if (rc == VINF_SUCCESS)
10627 *pTmpRsp = NewRsp;
10628
10629 return rc;
10630}
10631
10632
10633/**
10634 * Pushes a dword onto the stack, using a temporary stack pointer.
10635 *
10636 * @returns Strict VBox status code.
10637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10638 * @param u64Value The value to push.
10639 * @param pTmpRsp Pointer to the temporary stack pointer.
10640 */
10641IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10642{
10643 /* Increment the stack pointer. */
10644 RTUINT64U NewRsp = *pTmpRsp;
10645 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10646
10647 /* Write the word the lazy way. */
10648 uint64_t *pu64Dst;
10649 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10650 if (rc == VINF_SUCCESS)
10651 {
10652 *pu64Dst = u64Value;
10653 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10654 }
10655
10656 /* Commit the new RSP value unless we an access handler made trouble. */
10657 if (rc == VINF_SUCCESS)
10658 *pTmpRsp = NewRsp;
10659
10660 return rc;
10661}
10662
10663
10664/**
10665 * Pops a word from the stack, using a temporary stack pointer.
10666 *
10667 * @returns Strict VBox status code.
10668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10669 * @param pu16Value Where to store the popped value.
10670 * @param pTmpRsp Pointer to the temporary stack pointer.
10671 */
10672IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10673{
10674 /* Increment the stack pointer. */
10675 RTUINT64U NewRsp = *pTmpRsp;
10676 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10677
10678 /* Write the word the lazy way. */
10679 uint16_t const *pu16Src;
10680 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10681 if (rc == VINF_SUCCESS)
10682 {
10683 *pu16Value = *pu16Src;
10684 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10685
10686 /* Commit the new RSP value. */
10687 if (rc == VINF_SUCCESS)
10688 *pTmpRsp = NewRsp;
10689 }
10690
10691 return rc;
10692}
10693
10694
10695/**
10696 * Pops a dword from the stack, using a temporary stack pointer.
10697 *
10698 * @returns Strict VBox status code.
10699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10700 * @param pu32Value Where to store the popped value.
10701 * @param pTmpRsp Pointer to the temporary stack pointer.
10702 */
10703IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10704{
10705 /* Increment the stack pointer. */
10706 RTUINT64U NewRsp = *pTmpRsp;
10707 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10708
10709 /* Write the word the lazy way. */
10710 uint32_t const *pu32Src;
10711 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10712 if (rc == VINF_SUCCESS)
10713 {
10714 *pu32Value = *pu32Src;
10715 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10716
10717 /* Commit the new RSP value. */
10718 if (rc == VINF_SUCCESS)
10719 *pTmpRsp = NewRsp;
10720 }
10721
10722 return rc;
10723}
10724
10725
10726/**
10727 * Pops a qword from the stack, using a temporary stack pointer.
10728 *
10729 * @returns Strict VBox status code.
10730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10731 * @param pu64Value Where to store the popped value.
10732 * @param pTmpRsp Pointer to the temporary stack pointer.
10733 */
10734IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10735{
10736 /* Increment the stack pointer. */
10737 RTUINT64U NewRsp = *pTmpRsp;
10738 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10739
10740 /* Write the word the lazy way. */
10741 uint64_t const *pu64Src;
10742 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10743 if (rcStrict == VINF_SUCCESS)
10744 {
10745 *pu64Value = *pu64Src;
10746 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10747
10748 /* Commit the new RSP value. */
10749 if (rcStrict == VINF_SUCCESS)
10750 *pTmpRsp = NewRsp;
10751 }
10752
10753 return rcStrict;
10754}
10755
10756
10757/**
10758 * Begin a special stack push (used by interrupt, exceptions and such).
10759 *
10760 * This will raise \#SS or \#PF if appropriate.
10761 *
10762 * @returns Strict VBox status code.
10763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10764 * @param cbMem The number of bytes to push onto the stack.
10765 * @param ppvMem Where to return the pointer to the stack memory.
10766 * As with the other memory functions this could be
10767 * direct access or bounce buffered access, so
10768 * don't commit register until the commit call
10769 * succeeds.
10770 * @param puNewRsp Where to return the new RSP value. This must be
10771 * passed unchanged to
10772 * iemMemStackPushCommitSpecial().
10773 */
10774IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10775{
10776 Assert(cbMem < UINT8_MAX);
10777 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10778 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10779}
10780
10781
10782/**
10783 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10784 *
10785 * This will update the rSP.
10786 *
10787 * @returns Strict VBox status code.
10788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10789 * @param pvMem The pointer returned by
10790 * iemMemStackPushBeginSpecial().
10791 * @param uNewRsp The new RSP value returned by
10792 * iemMemStackPushBeginSpecial().
10793 */
10794IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10795{
10796 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10797 if (rcStrict == VINF_SUCCESS)
10798 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10799 return rcStrict;
10800}
10801
10802
10803/**
10804 * Begin a special stack pop (used by iret, retf and such).
10805 *
10806 * This will raise \#SS or \#PF if appropriate.
10807 *
10808 * @returns Strict VBox status code.
10809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10810 * @param cbMem The number of bytes to pop from the stack.
10811 * @param ppvMem Where to return the pointer to the stack memory.
10812 * @param puNewRsp Where to return the new RSP value. This must be
10813 * assigned to CPUMCTX::rsp manually some time
10814 * after iemMemStackPopDoneSpecial() has been
10815 * called.
10816 */
10817IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10818{
10819 Assert(cbMem < UINT8_MAX);
10820 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10821 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10822}
10823
10824
10825/**
10826 * Continue a special stack pop (used by iret and retf).
10827 *
10828 * This will raise \#SS or \#PF if appropriate.
10829 *
10830 * @returns Strict VBox status code.
10831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10832 * @param cbMem The number of bytes to pop from the stack.
10833 * @param ppvMem Where to return the pointer to the stack memory.
10834 * @param puNewRsp Where to return the new RSP value. This must be
10835 * assigned to CPUMCTX::rsp manually some time
10836 * after iemMemStackPopDoneSpecial() has been
10837 * called.
10838 */
10839IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10840{
10841 Assert(cbMem < UINT8_MAX);
10842 RTUINT64U NewRsp;
10843 NewRsp.u = *puNewRsp;
10844 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10845 *puNewRsp = NewRsp.u;
10846 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10847}
10848
10849
10850/**
10851 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10852 * iemMemStackPopContinueSpecial).
10853 *
10854 * The caller will manually commit the rSP.
10855 *
10856 * @returns Strict VBox status code.
10857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10858 * @param pvMem The pointer returned by
10859 * iemMemStackPopBeginSpecial() or
10860 * iemMemStackPopContinueSpecial().
10861 */
10862IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10863{
10864 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10865}
10866
10867
10868/**
10869 * Fetches a system table byte.
10870 *
10871 * @returns Strict VBox status code.
10872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10873 * @param pbDst Where to return the byte.
10874 * @param iSegReg The index of the segment register to use for
10875 * this access. The base and limits are checked.
10876 * @param GCPtrMem The address of the guest memory.
10877 */
10878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10879{
10880 /* The lazy approach for now... */
10881 uint8_t const *pbSrc;
10882 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10883 if (rc == VINF_SUCCESS)
10884 {
10885 *pbDst = *pbSrc;
10886 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10887 }
10888 return rc;
10889}
10890
10891
10892/**
10893 * Fetches a system table word.
10894 *
10895 * @returns Strict VBox status code.
10896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10897 * @param pu16Dst Where to return the word.
10898 * @param iSegReg The index of the segment register to use for
10899 * this access. The base and limits are checked.
10900 * @param GCPtrMem The address of the guest memory.
10901 */
10902IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10903{
10904 /* The lazy approach for now... */
10905 uint16_t const *pu16Src;
10906 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10907 if (rc == VINF_SUCCESS)
10908 {
10909 *pu16Dst = *pu16Src;
10910 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10911 }
10912 return rc;
10913}
10914
10915
10916/**
10917 * Fetches a system table dword.
10918 *
10919 * @returns Strict VBox status code.
10920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10921 * @param pu32Dst Where to return the dword.
10922 * @param iSegReg The index of the segment register to use for
10923 * this access. The base and limits are checked.
10924 * @param GCPtrMem The address of the guest memory.
10925 */
10926IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10927{
10928 /* The lazy approach for now... */
10929 uint32_t const *pu32Src;
10930 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10931 if (rc == VINF_SUCCESS)
10932 {
10933 *pu32Dst = *pu32Src;
10934 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10935 }
10936 return rc;
10937}
10938
10939
10940/**
10941 * Fetches a system table qword.
10942 *
10943 * @returns Strict VBox status code.
10944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10945 * @param pu64Dst Where to return the qword.
10946 * @param iSegReg The index of the segment register to use for
10947 * this access. The base and limits are checked.
10948 * @param GCPtrMem The address of the guest memory.
10949 */
10950IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10951{
10952 /* The lazy approach for now... */
10953 uint64_t const *pu64Src;
10954 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10955 if (rc == VINF_SUCCESS)
10956 {
10957 *pu64Dst = *pu64Src;
10958 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10959 }
10960 return rc;
10961}
10962
10963
10964/**
10965 * Fetches a descriptor table entry with caller specified error code.
10966 *
10967 * @returns Strict VBox status code.
10968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10969 * @param pDesc Where to return the descriptor table entry.
10970 * @param uSel The selector which table entry to fetch.
10971 * @param uXcpt The exception to raise on table lookup error.
10972 * @param uErrorCode The error code associated with the exception.
10973 */
10974IEM_STATIC VBOXSTRICTRC
10975iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10976{
10977 AssertPtr(pDesc);
10978 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10979
10980 /** @todo did the 286 require all 8 bytes to be accessible? */
10981 /*
10982 * Get the selector table base and check bounds.
10983 */
10984 RTGCPTR GCPtrBase;
10985 if (uSel & X86_SEL_LDT)
10986 {
10987 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10988 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10989 {
10990 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10991 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10992 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10993 uErrorCode, 0);
10994 }
10995
10996 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10997 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10998 }
10999 else
11000 {
11001 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
11002 {
11003 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11004 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11005 uErrorCode, 0);
11006 }
11007 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11008 }
11009
11010 /*
11011 * Read the legacy descriptor and maybe the long mode extensions if
11012 * required.
11013 */
11014 VBOXSTRICTRC rcStrict;
11015 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11016 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11017 else
11018 {
11019 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11020 if (rcStrict == VINF_SUCCESS)
11021 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11022 if (rcStrict == VINF_SUCCESS)
11023 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11024 if (rcStrict == VINF_SUCCESS)
11025 pDesc->Legacy.au16[3] = 0;
11026 else
11027 return rcStrict;
11028 }
11029
11030 if (rcStrict == VINF_SUCCESS)
11031 {
11032 if ( !IEM_IS_LONG_MODE(pVCpu)
11033 || pDesc->Legacy.Gen.u1DescType)
11034 pDesc->Long.au64[1] = 0;
11035 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11036 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11037 else
11038 {
11039 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11040 /** @todo is this the right exception? */
11041 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11042 }
11043 }
11044 return rcStrict;
11045}
11046
11047
11048/**
11049 * Fetches a descriptor table entry.
11050 *
11051 * @returns Strict VBox status code.
11052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11053 * @param pDesc Where to return the descriptor table entry.
11054 * @param uSel The selector which table entry to fetch.
11055 * @param uXcpt The exception to raise on table lookup error.
11056 */
11057IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11058{
11059 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11060}
11061
11062
11063/**
11064 * Fakes a long mode stack selector for SS = 0.
11065 *
11066 * @param pDescSs Where to return the fake stack descriptor.
11067 * @param uDpl The DPL we want.
11068 */
11069IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11070{
11071 pDescSs->Long.au64[0] = 0;
11072 pDescSs->Long.au64[1] = 0;
11073 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11074 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11075 pDescSs->Long.Gen.u2Dpl = uDpl;
11076 pDescSs->Long.Gen.u1Present = 1;
11077 pDescSs->Long.Gen.u1Long = 1;
11078}
11079
11080
11081/**
11082 * Marks the selector descriptor as accessed (only non-system descriptors).
11083 *
11084 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11085 * will therefore skip the limit checks.
11086 *
11087 * @returns Strict VBox status code.
11088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11089 * @param uSel The selector.
11090 */
11091IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11092{
11093 /*
11094 * Get the selector table base and calculate the entry address.
11095 */
11096 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11097 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11098 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11099 GCPtr += uSel & X86_SEL_MASK;
11100
11101 /*
11102 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11103 * ugly stuff to avoid this. This will make sure it's an atomic access
11104 * as well more or less remove any question about 8-bit or 32-bit accesss.
11105 */
11106 VBOXSTRICTRC rcStrict;
11107 uint32_t volatile *pu32;
11108 if ((GCPtr & 3) == 0)
11109 {
11110 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11111 GCPtr += 2 + 2;
11112 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11113 if (rcStrict != VINF_SUCCESS)
11114 return rcStrict;
11115 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11116 }
11117 else
11118 {
11119 /* The misaligned GDT/LDT case, map the whole thing. */
11120 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11121 if (rcStrict != VINF_SUCCESS)
11122 return rcStrict;
11123 switch ((uintptr_t)pu32 & 3)
11124 {
11125 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11126 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11127 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11128 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11129 }
11130 }
11131
11132 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11133}
11134
11135/** @} */
11136
11137
11138/*
11139 * Include the C/C++ implementation of instruction.
11140 */
11141#include "IEMAllCImpl.cpp.h"
11142
11143
11144
11145/** @name "Microcode" macros.
11146 *
11147 * The idea is that we should be able to use the same code to interpret
11148 * instructions as well as recompiler instructions. Thus this obfuscation.
11149 *
11150 * @{
11151 */
11152#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11153#define IEM_MC_END() }
11154#define IEM_MC_PAUSE() do {} while (0)
11155#define IEM_MC_CONTINUE() do {} while (0)
11156
11157/** Internal macro. */
11158#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11159 do \
11160 { \
11161 VBOXSTRICTRC rcStrict2 = a_Expr; \
11162 if (rcStrict2 != VINF_SUCCESS) \
11163 return rcStrict2; \
11164 } while (0)
11165
11166
11167#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11168#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11169#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11170#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11171#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11172#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11173#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11174#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11175#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11176 do { \
11177 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11178 return iemRaiseDeviceNotAvailable(pVCpu); \
11179 } while (0)
11180#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11181 do { \
11182 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11183 return iemRaiseDeviceNotAvailable(pVCpu); \
11184 } while (0)
11185#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11186 do { \
11187 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11188 return iemRaiseMathFault(pVCpu); \
11189 } while (0)
11190#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11191 do { \
11192 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11193 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11194 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11195 return iemRaiseUndefinedOpcode(pVCpu); \
11196 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11197 return iemRaiseDeviceNotAvailable(pVCpu); \
11198 } while (0)
11199#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11200 do { \
11201 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11202 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11203 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11204 return iemRaiseUndefinedOpcode(pVCpu); \
11205 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11206 return iemRaiseDeviceNotAvailable(pVCpu); \
11207 } while (0)
11208#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11209 do { \
11210 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11211 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11212 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11213 return iemRaiseUndefinedOpcode(pVCpu); \
11214 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11215 return iemRaiseDeviceNotAvailable(pVCpu); \
11216 } while (0)
11217#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11218 do { \
11219 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11220 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11221 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11222 return iemRaiseUndefinedOpcode(pVCpu); \
11223 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11224 return iemRaiseDeviceNotAvailable(pVCpu); \
11225 } while (0)
11226#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11227 do { \
11228 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11229 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11230 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11231 return iemRaiseUndefinedOpcode(pVCpu); \
11232 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11233 return iemRaiseDeviceNotAvailable(pVCpu); \
11234 } while (0)
11235#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11236 do { \
11237 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11238 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11239 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11240 return iemRaiseUndefinedOpcode(pVCpu); \
11241 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11242 return iemRaiseDeviceNotAvailable(pVCpu); \
11243 } while (0)
11244#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11245 do { \
11246 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11247 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11248 return iemRaiseUndefinedOpcode(pVCpu); \
11249 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11250 return iemRaiseDeviceNotAvailable(pVCpu); \
11251 } while (0)
11252#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11253 do { \
11254 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11255 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11256 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11257 return iemRaiseUndefinedOpcode(pVCpu); \
11258 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11259 return iemRaiseDeviceNotAvailable(pVCpu); \
11260 } while (0)
11261#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11262 do { \
11263 if (pVCpu->iem.s.uCpl != 0) \
11264 return iemRaiseGeneralProtectionFault0(pVCpu); \
11265 } while (0)
11266#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11267 do { \
11268 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11269 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11270 } while (0)
11271#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11272 do { \
11273 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11274 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11275 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11276 return iemRaiseUndefinedOpcode(pVCpu); \
11277 } while (0)
11278#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11279 do { \
11280 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11281 return iemRaiseGeneralProtectionFault0(pVCpu); \
11282 } while (0)
11283
11284
11285#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11286#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11287#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11288#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11289#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11290#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11291#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11292 uint32_t a_Name; \
11293 uint32_t *a_pName = &a_Name
11294#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11295 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11296
11297#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11298#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11299
11300#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11317#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11318 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11319 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11320 } while (0)
11321#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11322 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11323 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11324 } while (0)
11325#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11326 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11327 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11328 } while (0)
11329/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11330#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11331 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11332 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11333 } while (0)
11334#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11335 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11336 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11337 } while (0)
11338/** @note Not for IOPL or IF testing or modification. */
11339#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11340#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11341#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11342#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11343
11344#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11345#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11346#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11347#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11348#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11349#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11350#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11351#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11352#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11353#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11354/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11355#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11356 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11357 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11358 } while (0)
11359#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11360 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11361 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11362 } while (0)
11363#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11364 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11365
11366
11367#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11368#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11369/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11370 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11371#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11372#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11373/** @note Not for IOPL or IF testing or modification. */
11374#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11375
11376#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11377#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11378#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11379 do { \
11380 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11381 *pu32Reg += (a_u32Value); \
11382 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11383 } while (0)
11384#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11385
11386#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11387#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11388#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11389 do { \
11390 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11391 *pu32Reg -= (a_u32Value); \
11392 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11393 } while (0)
11394#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11395#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11396
11397#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11398#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11399#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11400#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11401#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11402#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11403#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11404
11405#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11406#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11407#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11408#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11409
11410#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11411#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11412#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11413
11414#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11415#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11416#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11417
11418#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11419#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11420#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11421
11422#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11423#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11424#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11425
11426#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11427
11428#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11429
11430#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11431#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11432#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11433 do { \
11434 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11435 *pu32Reg &= (a_u32Value); \
11436 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11437 } while (0)
11438#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11439
11440#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11441#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11442#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11443 do { \
11444 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11445 *pu32Reg |= (a_u32Value); \
11446 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11447 } while (0)
11448#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11449
11450
11451/** @note Not for IOPL or IF modification. */
11452#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11453/** @note Not for IOPL or IF modification. */
11454#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11455/** @note Not for IOPL or IF modification. */
11456#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11457
11458#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11459
11460/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11461#define IEM_MC_FPU_TO_MMX_MODE() do { \
11462 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11463 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11464 } while (0)
11465
11466/** Switches the FPU state from MMX mode (FTW=0xffff). */
11467#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11468 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11469 } while (0)
11470
11471#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11472 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11473#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11474 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11475#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11476 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11477 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11478 } while (0)
11479#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11480 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11481 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11482 } while (0)
11483#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11484 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11485#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11486 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11487#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11488 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11489
11490#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11491 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11492 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11493 } while (0)
11494#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11495 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11496#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11497 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11498#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11499 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11500#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11501 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11502 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11503 } while (0)
11504#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11505 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11506#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11507 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11508 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11509 } while (0)
11510#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11511 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11512#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11513 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11514 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11515 } while (0)
11516#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11517 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11518#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11519 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11520#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11521 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11522#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11523 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11524#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11525 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11526 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11527 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11528 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11529 } while (0)
11530
11531#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11532 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11533 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11534 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11535 } while (0)
11536#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11537 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11538 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11539 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11540 } while (0)
11541#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11542 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11543 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11544 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11545 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11546 } while (0)
11547#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11548 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11549 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11550 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11551 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11552 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11553 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11554 } while (0)
11555
11556#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11557#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11558 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11559 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11560 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11561 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11563 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11564 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11565 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11566 } while (0)
11567#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11568 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11569 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11570 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11571 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11572 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11573 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11574 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11575 } while (0)
11576#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11577 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11578 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11579 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11580 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11581 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11582 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11583 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11584 } while (0)
11585#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11586 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11587 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11588 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11589 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11590 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11591 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11592 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11593 } while (0)
11594
11595#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11596 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11597#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11598 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11599#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11600 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11601#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11602 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11603 uintptr_t const iYRegTmp = (a_iYReg); \
11604 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11605 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11606 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11607 } while (0)
11608
11609#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11610 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11611 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11612 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11613 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11614 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11615 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11616 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11617 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11618 } while (0)
11619#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11620 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11621 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11622 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11623 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11624 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11625 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11626 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11627 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11628 } while (0)
11629#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11630 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11631 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11632 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11633 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11634 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11635 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11636 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11637 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11638 } while (0)
11639
11640#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11641 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11642 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11643 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11644 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11645 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11646 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11647 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11648 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11649 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11650 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11651 } while (0)
11652#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11653 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11654 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11655 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11656 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11657 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11658 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11659 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11660 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11661 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11662 } while (0)
11663#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11664 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11665 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11666 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11667 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11668 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11669 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11670 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11671 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11672 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11673 } while (0)
11674#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11675 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11676 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11677 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11678 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11679 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11680 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11681 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11682 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11683 } while (0)
11684
11685#ifndef IEM_WITH_SETJMP
11686# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11688# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11690# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11692#else
11693# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11694 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11695# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11696 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11697# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11698 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11699#endif
11700
11701#ifndef IEM_WITH_SETJMP
11702# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11704# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11706# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11708#else
11709# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11710 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11711# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11712 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11713# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11714 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11715#endif
11716
11717#ifndef IEM_WITH_SETJMP
11718# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11720# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11722# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11724#else
11725# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11726 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11727# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11728 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11729# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11730 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11731#endif
11732
11733#ifdef SOME_UNUSED_FUNCTION
11734# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11736#endif
11737
11738#ifndef IEM_WITH_SETJMP
11739# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11741# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11743# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11745# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11747#else
11748# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11749 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11750# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11751 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11752# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11753 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11754# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11755 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11756#endif
11757
11758#ifndef IEM_WITH_SETJMP
11759# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11760 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11761# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11763# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11765#else
11766# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11767 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11768# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11769 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11770# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11771 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11772#endif
11773
11774#ifndef IEM_WITH_SETJMP
11775# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11776 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11777# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11779#else
11780# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11781 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11782# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11783 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11784#endif
11785
11786#ifndef IEM_WITH_SETJMP
11787# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11789# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11791#else
11792# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11793 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11794# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11795 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11796#endif
11797
11798
11799
11800#ifndef IEM_WITH_SETJMP
11801# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11802 do { \
11803 uint8_t u8Tmp; \
11804 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11805 (a_u16Dst) = u8Tmp; \
11806 } while (0)
11807# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11808 do { \
11809 uint8_t u8Tmp; \
11810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11811 (a_u32Dst) = u8Tmp; \
11812 } while (0)
11813# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11814 do { \
11815 uint8_t u8Tmp; \
11816 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11817 (a_u64Dst) = u8Tmp; \
11818 } while (0)
11819# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11820 do { \
11821 uint16_t u16Tmp; \
11822 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11823 (a_u32Dst) = u16Tmp; \
11824 } while (0)
11825# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11826 do { \
11827 uint16_t u16Tmp; \
11828 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11829 (a_u64Dst) = u16Tmp; \
11830 } while (0)
11831# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11832 do { \
11833 uint32_t u32Tmp; \
11834 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11835 (a_u64Dst) = u32Tmp; \
11836 } while (0)
11837#else /* IEM_WITH_SETJMP */
11838# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11839 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11840# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11841 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11842# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11843 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11844# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11845 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11846# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11847 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11848# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11849 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11850#endif /* IEM_WITH_SETJMP */
11851
11852#ifndef IEM_WITH_SETJMP
11853# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11854 do { \
11855 uint8_t u8Tmp; \
11856 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11857 (a_u16Dst) = (int8_t)u8Tmp; \
11858 } while (0)
11859# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11860 do { \
11861 uint8_t u8Tmp; \
11862 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11863 (a_u32Dst) = (int8_t)u8Tmp; \
11864 } while (0)
11865# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11866 do { \
11867 uint8_t u8Tmp; \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11869 (a_u64Dst) = (int8_t)u8Tmp; \
11870 } while (0)
11871# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11872 do { \
11873 uint16_t u16Tmp; \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11875 (a_u32Dst) = (int16_t)u16Tmp; \
11876 } while (0)
11877# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11878 do { \
11879 uint16_t u16Tmp; \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11881 (a_u64Dst) = (int16_t)u16Tmp; \
11882 } while (0)
11883# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11884 do { \
11885 uint32_t u32Tmp; \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11887 (a_u64Dst) = (int32_t)u32Tmp; \
11888 } while (0)
11889#else /* IEM_WITH_SETJMP */
11890# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11891 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11892# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11893 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11894# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11895 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11896# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11897 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11898# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11899 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11900# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11901 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11902#endif /* IEM_WITH_SETJMP */
11903
11904#ifndef IEM_WITH_SETJMP
11905# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11906 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11907# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11908 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11909# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11911# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11912 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11913#else
11914# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11915 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11916# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11917 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11918# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11919 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11920# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11921 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11922#endif
11923
11924#ifndef IEM_WITH_SETJMP
11925# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11926 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11927# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11928 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11929# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11930 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11931# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11932 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11933#else
11934# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11935 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11936# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11937 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11938# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11939 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11940# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11941 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11942#endif
11943
11944#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11945#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11946#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11947#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11948#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11949#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11950#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11951 do { \
11952 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11953 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11954 } while (0)
11955
11956#ifndef IEM_WITH_SETJMP
11957# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11958 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11959# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11960 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11961#else
11962# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11963 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11964# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11965 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11966#endif
11967
11968#ifndef IEM_WITH_SETJMP
11969# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11970 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11971# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11972 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11973#else
11974# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11975 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11976# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11977 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11978#endif
11979
11980
11981#define IEM_MC_PUSH_U16(a_u16Value) \
11982 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11983#define IEM_MC_PUSH_U32(a_u32Value) \
11984 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11985#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11987#define IEM_MC_PUSH_U64(a_u64Value) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11989
11990#define IEM_MC_POP_U16(a_pu16Value) \
11991 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11992#define IEM_MC_POP_U32(a_pu32Value) \
11993 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11994#define IEM_MC_POP_U64(a_pu64Value) \
11995 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11996
11997/** Maps guest memory for direct or bounce buffered access.
11998 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11999 * @remarks May return.
12000 */
12001#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12002 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12003
12004/** Maps guest memory for direct or bounce buffered access.
12005 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12006 * @remarks May return.
12007 */
12008#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12009 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12010
12011/** Commits the memory and unmaps the guest memory.
12012 * @remarks May return.
12013 */
12014#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12015 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12016
12017/** Commits the memory and unmaps the guest memory unless the FPU status word
12018 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12019 * that would cause FLD not to store.
12020 *
12021 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12022 * store, while \#P will not.
12023 *
12024 * @remarks May in theory return - for now.
12025 */
12026#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12027 do { \
12028 if ( !(a_u16FSW & X86_FSW_ES) \
12029 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12030 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12031 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12032 } while (0)
12033
12034/** Calculate efficient address from R/M. */
12035#ifndef IEM_WITH_SETJMP
12036# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12037 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12038#else
12039# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12040 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12041#endif
12042
12043#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12044#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12045#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12046#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12047#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12048#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12049#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12050
12051/**
12052 * Defers the rest of the instruction emulation to a C implementation routine
12053 * and returns, only taking the standard parameters.
12054 *
12055 * @param a_pfnCImpl The pointer to the C routine.
12056 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12057 */
12058#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12059
12060/**
12061 * Defers the rest of instruction emulation to a C implementation routine and
12062 * returns, taking one argument in addition to the standard ones.
12063 *
12064 * @param a_pfnCImpl The pointer to the C routine.
12065 * @param a0 The argument.
12066 */
12067#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12068
12069/**
12070 * Defers the rest of the instruction emulation to a C implementation routine
12071 * and returns, taking two arguments in addition to the standard ones.
12072 *
12073 * @param a_pfnCImpl The pointer to the C routine.
12074 * @param a0 The first extra argument.
12075 * @param a1 The second extra argument.
12076 */
12077#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12078
12079/**
12080 * Defers the rest of the instruction emulation to a C implementation routine
12081 * and returns, taking three arguments in addition to the standard ones.
12082 *
12083 * @param a_pfnCImpl The pointer to the C routine.
12084 * @param a0 The first extra argument.
12085 * @param a1 The second extra argument.
12086 * @param a2 The third extra argument.
12087 */
12088#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12089
12090/**
12091 * Defers the rest of the instruction emulation to a C implementation routine
12092 * and returns, taking four arguments in addition to the standard ones.
12093 *
12094 * @param a_pfnCImpl The pointer to the C routine.
12095 * @param a0 The first extra argument.
12096 * @param a1 The second extra argument.
12097 * @param a2 The third extra argument.
12098 * @param a3 The fourth extra argument.
12099 */
12100#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12101
12102/**
12103 * Defers the rest of the instruction emulation to a C implementation routine
12104 * and returns, taking two arguments in addition to the standard ones.
12105 *
12106 * @param a_pfnCImpl The pointer to the C routine.
12107 * @param a0 The first extra argument.
12108 * @param a1 The second extra argument.
12109 * @param a2 The third extra argument.
12110 * @param a3 The fourth extra argument.
12111 * @param a4 The fifth extra argument.
12112 */
12113#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12114
12115/**
12116 * Defers the entire instruction emulation to a C implementation routine and
12117 * returns, only taking the standard parameters.
12118 *
12119 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12120 *
12121 * @param a_pfnCImpl The pointer to the C routine.
12122 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12123 */
12124#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12125
12126/**
12127 * Defers the entire instruction emulation to a C implementation routine and
12128 * returns, taking one argument in addition to the standard ones.
12129 *
12130 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12131 *
12132 * @param a_pfnCImpl The pointer to the C routine.
12133 * @param a0 The argument.
12134 */
12135#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12136
12137/**
12138 * Defers the entire instruction emulation to a C implementation routine and
12139 * returns, taking two arguments in addition to the standard ones.
12140 *
12141 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12142 *
12143 * @param a_pfnCImpl The pointer to the C routine.
12144 * @param a0 The first extra argument.
12145 * @param a1 The second extra argument.
12146 */
12147#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12148
12149/**
12150 * Defers the entire instruction emulation to a C implementation routine and
12151 * returns, taking three arguments in addition to the standard ones.
12152 *
12153 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12154 *
12155 * @param a_pfnCImpl The pointer to the C routine.
12156 * @param a0 The first extra argument.
12157 * @param a1 The second extra argument.
12158 * @param a2 The third extra argument.
12159 */
12160#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12161
12162/**
12163 * Calls a FPU assembly implementation taking one visible argument.
12164 *
12165 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12166 * @param a0 The first extra argument.
12167 */
12168#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12169 do { \
12170 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12171 } while (0)
12172
12173/**
12174 * Calls a FPU assembly implementation taking two visible arguments.
12175 *
12176 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12177 * @param a0 The first extra argument.
12178 * @param a1 The second extra argument.
12179 */
12180#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12181 do { \
12182 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12183 } while (0)
12184
12185/**
12186 * Calls a FPU assembly implementation taking three visible arguments.
12187 *
12188 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12189 * @param a0 The first extra argument.
12190 * @param a1 The second extra argument.
12191 * @param a2 The third extra argument.
12192 */
12193#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12194 do { \
12195 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12196 } while (0)
12197
12198#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12199 do { \
12200 (a_FpuData).FSW = (a_FSW); \
12201 (a_FpuData).r80Result = *(a_pr80Value); \
12202 } while (0)
12203
12204/** Pushes FPU result onto the stack. */
12205#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12206 iemFpuPushResult(pVCpu, &a_FpuData)
12207/** Pushes FPU result onto the stack and sets the FPUDP. */
12208#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12209 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12210
12211/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12212#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12213 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12214
12215/** Stores FPU result in a stack register. */
12216#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12217 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12218/** Stores FPU result in a stack register and pops the stack. */
12219#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12220 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12221/** Stores FPU result in a stack register and sets the FPUDP. */
12222#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12223 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12224/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12225 * stack. */
12226#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12227 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12228
12229/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12230#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12231 iemFpuUpdateOpcodeAndIp(pVCpu)
12232/** Free a stack register (for FFREE and FFREEP). */
12233#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12234 iemFpuStackFree(pVCpu, a_iStReg)
12235/** Increment the FPU stack pointer. */
12236#define IEM_MC_FPU_STACK_INC_TOP() \
12237 iemFpuStackIncTop(pVCpu)
12238/** Decrement the FPU stack pointer. */
12239#define IEM_MC_FPU_STACK_DEC_TOP() \
12240 iemFpuStackDecTop(pVCpu)
12241
12242/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12243#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12244 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12245/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12246#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12247 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12248/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12249#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12250 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12251/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12252#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12253 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12254/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12255 * stack. */
12256#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12257 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12258/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12259#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12260 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12261
12262/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12263#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12264 iemFpuStackUnderflow(pVCpu, a_iStDst)
12265/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12266 * stack. */
12267#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12268 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12269/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12270 * FPUDS. */
12271#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12272 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12273/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12274 * FPUDS. Pops stack. */
12275#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12276 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12277/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12278 * stack twice. */
12279#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12280 iemFpuStackUnderflowThenPopPop(pVCpu)
12281/** Raises a FPU stack underflow exception for an instruction pushing a result
12282 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12283#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12284 iemFpuStackPushUnderflow(pVCpu)
12285/** Raises a FPU stack underflow exception for an instruction pushing a result
12286 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12287#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12288 iemFpuStackPushUnderflowTwo(pVCpu)
12289
12290/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12291 * FPUIP, FPUCS and FOP. */
12292#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12293 iemFpuStackPushOverflow(pVCpu)
12294/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12295 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12296#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12297 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12298/** Prepares for using the FPU state.
12299 * Ensures that we can use the host FPU in the current context (RC+R0.
12300 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12301#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12302/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12303#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12304/** Actualizes the guest FPU state so it can be accessed and modified. */
12305#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12306
12307/** Prepares for using the SSE state.
12308 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12309 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12310#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12311/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12312#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12313/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12314#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12315
12316/** Prepares for using the AVX state.
12317 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12318 * Ensures the guest AVX state in the CPUMCTX is up to date.
12319 * @note This will include the AVX512 state too when support for it is added
12320 * due to the zero extending feature of VEX instruction. */
12321#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12322/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12323#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12324/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12325#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12326
12327/**
12328 * Calls a MMX assembly implementation taking two visible arguments.
12329 *
12330 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12331 * @param a0 The first extra argument.
12332 * @param a1 The second extra argument.
12333 */
12334#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12335 do { \
12336 IEM_MC_PREPARE_FPU_USAGE(); \
12337 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12338 } while (0)
12339
12340/**
12341 * Calls a MMX assembly implementation taking three visible arguments.
12342 *
12343 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12344 * @param a0 The first extra argument.
12345 * @param a1 The second extra argument.
12346 * @param a2 The third extra argument.
12347 */
12348#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12349 do { \
12350 IEM_MC_PREPARE_FPU_USAGE(); \
12351 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12352 } while (0)
12353
12354
12355/**
12356 * Calls a SSE assembly implementation taking two visible arguments.
12357 *
12358 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12359 * @param a0 The first extra argument.
12360 * @param a1 The second extra argument.
12361 */
12362#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12363 do { \
12364 IEM_MC_PREPARE_SSE_USAGE(); \
12365 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12366 } while (0)
12367
12368/**
12369 * Calls a SSE assembly implementation taking three visible arguments.
12370 *
12371 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12372 * @param a0 The first extra argument.
12373 * @param a1 The second extra argument.
12374 * @param a2 The third extra argument.
12375 */
12376#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12377 do { \
12378 IEM_MC_PREPARE_SSE_USAGE(); \
12379 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12380 } while (0)
12381
12382
12383/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12384 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12385#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12386 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12387
12388/**
12389 * Calls a AVX assembly implementation taking two visible arguments.
12390 *
12391 * There is one implicit zero'th argument, a pointer to the extended state.
12392 *
12393 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12394 * @param a1 The first extra argument.
12395 * @param a2 The second extra argument.
12396 */
12397#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12398 do { \
12399 IEM_MC_PREPARE_AVX_USAGE(); \
12400 a_pfnAImpl(pXState, (a1), (a2)); \
12401 } while (0)
12402
12403/**
12404 * Calls a AVX assembly implementation taking three visible arguments.
12405 *
12406 * There is one implicit zero'th argument, a pointer to the extended state.
12407 *
12408 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12409 * @param a1 The first extra argument.
12410 * @param a2 The second extra argument.
12411 * @param a3 The third extra argument.
12412 */
12413#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12414 do { \
12415 IEM_MC_PREPARE_AVX_USAGE(); \
12416 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12417 } while (0)
12418
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12421/** @note Not for IOPL or IF testing. */
12422#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12423/** @note Not for IOPL or IF testing. */
12424#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12425/** @note Not for IOPL or IF testing. */
12426#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12429 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12430 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12431/** @note Not for IOPL or IF testing. */
12432#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12433 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12434 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12435/** @note Not for IOPL or IF testing. */
12436#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12437 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12438 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12439 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12440/** @note Not for IOPL or IF testing. */
12441#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12442 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12443 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12444 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12445#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12446#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12447#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12448/** @note Not for IOPL or IF testing. */
12449#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12450 if ( pVCpu->cpum.GstCtx.cx != 0 \
12451 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12452/** @note Not for IOPL or IF testing. */
12453#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12454 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12455 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12456/** @note Not for IOPL or IF testing. */
12457#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12458 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12459 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12460/** @note Not for IOPL or IF testing. */
12461#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12462 if ( pVCpu->cpum.GstCtx.cx != 0 \
12463 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12464/** @note Not for IOPL or IF testing. */
12465#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12466 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12467 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12468/** @note Not for IOPL or IF testing. */
12469#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12470 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12471 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12472#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12473#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12474
12475#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12476 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12477#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12478 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12479#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12480 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12481#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12482 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12483#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12484 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12485#define IEM_MC_IF_FCW_IM() \
12486 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12487
12488#define IEM_MC_ELSE() } else {
12489#define IEM_MC_ENDIF() } do {} while (0)
12490
12491/** @} */
12492
12493
12494/** @name Opcode Debug Helpers.
12495 * @{
12496 */
12497#ifdef VBOX_WITH_STATISTICS
12498# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12499#else
12500# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12501#endif
12502
12503#ifdef DEBUG
12504# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12505 do { \
12506 IEMOP_INC_STATS(a_Stats); \
12507 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12508 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12509 } while (0)
12510
12511# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12512 do { \
12513 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12514 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12515 (void)RT_CONCAT(OP_,a_Upper); \
12516 (void)(a_fDisHints); \
12517 (void)(a_fIemHints); \
12518 } while (0)
12519
12520# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12521 do { \
12522 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12523 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12524 (void)RT_CONCAT(OP_,a_Upper); \
12525 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12526 (void)(a_fDisHints); \
12527 (void)(a_fIemHints); \
12528 } while (0)
12529
12530# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12531 do { \
12532 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12533 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12534 (void)RT_CONCAT(OP_,a_Upper); \
12535 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12536 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12537 (void)(a_fDisHints); \
12538 (void)(a_fIemHints); \
12539 } while (0)
12540
12541# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12542 do { \
12543 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12544 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12545 (void)RT_CONCAT(OP_,a_Upper); \
12546 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12547 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12548 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12549 (void)(a_fDisHints); \
12550 (void)(a_fIemHints); \
12551 } while (0)
12552
12553# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12554 do { \
12555 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12556 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12557 (void)RT_CONCAT(OP_,a_Upper); \
12558 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12559 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12560 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12561 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12562 (void)(a_fDisHints); \
12563 (void)(a_fIemHints); \
12564 } while (0)
12565
12566#else
12567# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12568
12569# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12570 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12571# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12572 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12573# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12574 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12575# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12576 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12577# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12578 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12579
12580#endif
12581
12582#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12583 IEMOP_MNEMONIC0EX(a_Lower, \
12584 #a_Lower, \
12585 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12586#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12587 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12588 #a_Lower " " #a_Op1, \
12589 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12590#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12591 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12592 #a_Lower " " #a_Op1 "," #a_Op2, \
12593 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12594#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12595 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12596 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12597 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12598#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12599 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12600 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12601 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12602
12603/** @} */
12604
12605
12606/** @name Opcode Helpers.
12607 * @{
12608 */
12609
12610#ifdef IN_RING3
12611# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12612 do { \
12613 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12614 else \
12615 { \
12616 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12617 return IEMOP_RAISE_INVALID_OPCODE(); \
12618 } \
12619 } while (0)
12620#else
12621# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12622 do { \
12623 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12624 else return IEMOP_RAISE_INVALID_OPCODE(); \
12625 } while (0)
12626#endif
12627
12628/** The instruction requires a 186 or later. */
12629#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12630# define IEMOP_HLP_MIN_186() do { } while (0)
12631#else
12632# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12633#endif
12634
12635/** The instruction requires a 286 or later. */
12636#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12637# define IEMOP_HLP_MIN_286() do { } while (0)
12638#else
12639# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12640#endif
12641
12642/** The instruction requires a 386 or later. */
12643#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12644# define IEMOP_HLP_MIN_386() do { } while (0)
12645#else
12646# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12647#endif
12648
12649/** The instruction requires a 386 or later if the given expression is true. */
12650#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12651# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12652#else
12653# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12654#endif
12655
12656/** The instruction requires a 486 or later. */
12657#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12658# define IEMOP_HLP_MIN_486() do { } while (0)
12659#else
12660# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12661#endif
12662
12663/** The instruction requires a Pentium (586) or later. */
12664#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12665# define IEMOP_HLP_MIN_586() do { } while (0)
12666#else
12667# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12668#endif
12669
12670/** The instruction requires a PentiumPro (686) or later. */
12671#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12672# define IEMOP_HLP_MIN_686() do { } while (0)
12673#else
12674# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12675#endif
12676
12677
12678/** The instruction raises an \#UD in real and V8086 mode. */
12679#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12680 do \
12681 { \
12682 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12683 else return IEMOP_RAISE_INVALID_OPCODE(); \
12684 } while (0)
12685
12686#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12687/** This instruction raises an \#UD in real and V8086 mode or when not using a
12688 * 64-bit code segment when in long mode (applicable to all VMX instructions
12689 * except VMCALL).
12690 */
12691#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12692 do \
12693 { \
12694 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12695 && ( !IEM_IS_LONG_MODE(pVCpu) \
12696 || IEM_IS_64BIT_CODE(pVCpu))) \
12697 { /* likely */ } \
12698 else \
12699 { \
12700 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12701 { \
12702 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12703 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12704 return IEMOP_RAISE_INVALID_OPCODE(); \
12705 } \
12706 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12707 { \
12708 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12709 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12710 return IEMOP_RAISE_INVALID_OPCODE(); \
12711 } \
12712 } \
12713 } while (0)
12714
12715/** The instruction can only be executed in VMX operation (VMX root mode and
12716 * non-root mode).
12717 *
12718 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12719 */
12720# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12721 do \
12722 { \
12723 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12724 else \
12725 { \
12726 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12727 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12728 return IEMOP_RAISE_INVALID_OPCODE(); \
12729 } \
12730 } while (0)
12731#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12732
12733/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12734 * 64-bit mode. */
12735#define IEMOP_HLP_NO_64BIT() \
12736 do \
12737 { \
12738 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12739 return IEMOP_RAISE_INVALID_OPCODE(); \
12740 } while (0)
12741
12742/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12743 * 64-bit mode. */
12744#define IEMOP_HLP_ONLY_64BIT() \
12745 do \
12746 { \
12747 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12748 return IEMOP_RAISE_INVALID_OPCODE(); \
12749 } while (0)
12750
12751/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12752#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12753 do \
12754 { \
12755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12756 iemRecalEffOpSize64Default(pVCpu); \
12757 } while (0)
12758
12759/** The instruction has 64-bit operand size if 64-bit mode. */
12760#define IEMOP_HLP_64BIT_OP_SIZE() \
12761 do \
12762 { \
12763 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12764 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12765 } while (0)
12766
12767/** Only a REX prefix immediately preceeding the first opcode byte takes
12768 * effect. This macro helps ensuring this as well as logging bad guest code. */
12769#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12770 do \
12771 { \
12772 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12773 { \
12774 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12775 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12776 pVCpu->iem.s.uRexB = 0; \
12777 pVCpu->iem.s.uRexIndex = 0; \
12778 pVCpu->iem.s.uRexReg = 0; \
12779 iemRecalEffOpSize(pVCpu); \
12780 } \
12781 } while (0)
12782
12783/**
12784 * Done decoding.
12785 */
12786#define IEMOP_HLP_DONE_DECODING() \
12787 do \
12788 { \
12789 /*nothing for now, maybe later... */ \
12790 } while (0)
12791
12792/**
12793 * Done decoding, raise \#UD exception if lock prefix present.
12794 */
12795#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12796 do \
12797 { \
12798 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12799 { /* likely */ } \
12800 else \
12801 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12802 } while (0)
12803
12804
12805/**
12806 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12807 * repnz or size prefixes are present, or if in real or v8086 mode.
12808 */
12809#define IEMOP_HLP_DONE_VEX_DECODING() \
12810 do \
12811 { \
12812 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12813 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12814 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12815 { /* likely */ } \
12816 else \
12817 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12818 } while (0)
12819
12820/**
12821 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12822 * repnz or size prefixes are present, or if in real or v8086 mode.
12823 */
12824#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12825 do \
12826 { \
12827 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12828 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12829 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12830 && pVCpu->iem.s.uVexLength == 0)) \
12831 { /* likely */ } \
12832 else \
12833 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12834 } while (0)
12835
12836
12837/**
12838 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12839 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12840 * register 0, or if in real or v8086 mode.
12841 */
12842#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12843 do \
12844 { \
12845 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12846 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12847 && !pVCpu->iem.s.uVex3rdReg \
12848 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12849 { /* likely */ } \
12850 else \
12851 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12852 } while (0)
12853
12854/**
12855 * Done decoding VEX, no V, L=0.
12856 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12857 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12858 */
12859#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12860 do \
12861 { \
12862 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12863 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12864 && pVCpu->iem.s.uVexLength == 0 \
12865 && pVCpu->iem.s.uVex3rdReg == 0 \
12866 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12867 { /* likely */ } \
12868 else \
12869 return IEMOP_RAISE_INVALID_OPCODE(); \
12870 } while (0)
12871
12872#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12873 do \
12874 { \
12875 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12876 { /* likely */ } \
12877 else \
12878 { \
12879 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12880 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12881 } \
12882 } while (0)
12883#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12884 do \
12885 { \
12886 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12887 { /* likely */ } \
12888 else \
12889 { \
12890 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12891 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12892 } \
12893 } while (0)
12894
12895/**
12896 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12897 * are present.
12898 */
12899#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12900 do \
12901 { \
12902 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12903 { /* likely */ } \
12904 else \
12905 return IEMOP_RAISE_INVALID_OPCODE(); \
12906 } while (0)
12907
12908/**
12909 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12910 * prefixes are present.
12911 */
12912#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12913 do \
12914 { \
12915 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12916 { /* likely */ } \
12917 else \
12918 return IEMOP_RAISE_INVALID_OPCODE(); \
12919 } while (0)
12920
12921
12922/**
12923 * Calculates the effective address of a ModR/M memory operand.
12924 *
12925 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12926 *
12927 * @return Strict VBox status code.
12928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12929 * @param bRm The ModRM byte.
12930 * @param cbImm The size of any immediate following the
12931 * effective address opcode bytes. Important for
12932 * RIP relative addressing.
12933 * @param pGCPtrEff Where to return the effective address.
12934 */
12935IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12936{
12937 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12938# define SET_SS_DEF() \
12939 do \
12940 { \
12941 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12942 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12943 } while (0)
12944
12945 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12946 {
12947/** @todo Check the effective address size crap! */
12948 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12949 {
12950 uint16_t u16EffAddr;
12951
12952 /* Handle the disp16 form with no registers first. */
12953 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12954 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12955 else
12956 {
12957 /* Get the displacment. */
12958 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12959 {
12960 case 0: u16EffAddr = 0; break;
12961 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12962 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12963 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12964 }
12965
12966 /* Add the base and index registers to the disp. */
12967 switch (bRm & X86_MODRM_RM_MASK)
12968 {
12969 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12970 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12971 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12972 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12973 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12974 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12975 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12976 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12977 }
12978 }
12979
12980 *pGCPtrEff = u16EffAddr;
12981 }
12982 else
12983 {
12984 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12985 uint32_t u32EffAddr;
12986
12987 /* Handle the disp32 form with no registers first. */
12988 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12989 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12990 else
12991 {
12992 /* Get the register (or SIB) value. */
12993 switch ((bRm & X86_MODRM_RM_MASK))
12994 {
12995 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12996 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12997 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12998 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12999 case 4: /* SIB */
13000 {
13001 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13002
13003 /* Get the index and scale it. */
13004 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13005 {
13006 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13007 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13008 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13009 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13010 case 4: u32EffAddr = 0; /*none */ break;
13011 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13012 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13013 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13014 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13015 }
13016 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13017
13018 /* add base */
13019 switch (bSib & X86_SIB_BASE_MASK)
13020 {
13021 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13022 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13023 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13024 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13025 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13026 case 5:
13027 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13028 {
13029 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13030 SET_SS_DEF();
13031 }
13032 else
13033 {
13034 uint32_t u32Disp;
13035 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13036 u32EffAddr += u32Disp;
13037 }
13038 break;
13039 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13040 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13041 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13042 }
13043 break;
13044 }
13045 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13046 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13047 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13048 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13049 }
13050
13051 /* Get and add the displacement. */
13052 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13053 {
13054 case 0:
13055 break;
13056 case 1:
13057 {
13058 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13059 u32EffAddr += i8Disp;
13060 break;
13061 }
13062 case 2:
13063 {
13064 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13065 u32EffAddr += u32Disp;
13066 break;
13067 }
13068 default:
13069 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13070 }
13071
13072 }
13073 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13074 *pGCPtrEff = u32EffAddr;
13075 else
13076 {
13077 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13078 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13079 }
13080 }
13081 }
13082 else
13083 {
13084 uint64_t u64EffAddr;
13085
13086 /* Handle the rip+disp32 form with no registers first. */
13087 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13088 {
13089 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13090 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13091 }
13092 else
13093 {
13094 /* Get the register (or SIB) value. */
13095 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13096 {
13097 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13098 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13099 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13100 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13101 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13102 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13103 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13104 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13105 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13106 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13107 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13108 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13109 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13110 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13111 /* SIB */
13112 case 4:
13113 case 12:
13114 {
13115 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13116
13117 /* Get the index and scale it. */
13118 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13119 {
13120 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13121 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13122 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13123 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13124 case 4: u64EffAddr = 0; /*none */ break;
13125 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13126 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13127 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13128 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13129 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13130 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13131 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13132 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13133 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13134 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13135 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13136 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13137 }
13138 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13139
13140 /* add base */
13141 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13142 {
13143 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13144 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13145 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13146 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13147 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13148 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13149 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13150 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13151 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13152 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13153 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13154 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13155 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13156 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13157 /* complicated encodings */
13158 case 5:
13159 case 13:
13160 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13161 {
13162 if (!pVCpu->iem.s.uRexB)
13163 {
13164 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13165 SET_SS_DEF();
13166 }
13167 else
13168 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13169 }
13170 else
13171 {
13172 uint32_t u32Disp;
13173 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13174 u64EffAddr += (int32_t)u32Disp;
13175 }
13176 break;
13177 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13178 }
13179 break;
13180 }
13181 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13182 }
13183
13184 /* Get and add the displacement. */
13185 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13186 {
13187 case 0:
13188 break;
13189 case 1:
13190 {
13191 int8_t i8Disp;
13192 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13193 u64EffAddr += i8Disp;
13194 break;
13195 }
13196 case 2:
13197 {
13198 uint32_t u32Disp;
13199 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13200 u64EffAddr += (int32_t)u32Disp;
13201 break;
13202 }
13203 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13204 }
13205
13206 }
13207
13208 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13209 *pGCPtrEff = u64EffAddr;
13210 else
13211 {
13212 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13213 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13214 }
13215 }
13216
13217 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13218 return VINF_SUCCESS;
13219}
13220
13221
13222/**
13223 * Calculates the effective address of a ModR/M memory operand.
13224 *
13225 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13226 *
13227 * @return Strict VBox status code.
13228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13229 * @param bRm The ModRM byte.
13230 * @param cbImm The size of any immediate following the
13231 * effective address opcode bytes. Important for
13232 * RIP relative addressing.
13233 * @param pGCPtrEff Where to return the effective address.
13234 * @param offRsp RSP displacement.
13235 */
13236IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13237{
13238 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13239# define SET_SS_DEF() \
13240 do \
13241 { \
13242 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13243 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13244 } while (0)
13245
13246 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13247 {
13248/** @todo Check the effective address size crap! */
13249 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13250 {
13251 uint16_t u16EffAddr;
13252
13253 /* Handle the disp16 form with no registers first. */
13254 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13255 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13256 else
13257 {
13258 /* Get the displacment. */
13259 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13260 {
13261 case 0: u16EffAddr = 0; break;
13262 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13263 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13264 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13265 }
13266
13267 /* Add the base and index registers to the disp. */
13268 switch (bRm & X86_MODRM_RM_MASK)
13269 {
13270 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13271 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13272 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13273 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13274 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13275 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13276 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13277 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13278 }
13279 }
13280
13281 *pGCPtrEff = u16EffAddr;
13282 }
13283 else
13284 {
13285 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13286 uint32_t u32EffAddr;
13287
13288 /* Handle the disp32 form with no registers first. */
13289 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13290 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13291 else
13292 {
13293 /* Get the register (or SIB) value. */
13294 switch ((bRm & X86_MODRM_RM_MASK))
13295 {
13296 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13297 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13298 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13299 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13300 case 4: /* SIB */
13301 {
13302 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13303
13304 /* Get the index and scale it. */
13305 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13306 {
13307 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13308 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13309 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13310 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13311 case 4: u32EffAddr = 0; /*none */ break;
13312 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13313 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13314 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13315 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13316 }
13317 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13318
13319 /* add base */
13320 switch (bSib & X86_SIB_BASE_MASK)
13321 {
13322 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13323 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13324 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13325 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13326 case 4:
13327 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13328 SET_SS_DEF();
13329 break;
13330 case 5:
13331 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13332 {
13333 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13334 SET_SS_DEF();
13335 }
13336 else
13337 {
13338 uint32_t u32Disp;
13339 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13340 u32EffAddr += u32Disp;
13341 }
13342 break;
13343 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13344 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13345 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13346 }
13347 break;
13348 }
13349 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13350 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13351 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13352 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13353 }
13354
13355 /* Get and add the displacement. */
13356 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13357 {
13358 case 0:
13359 break;
13360 case 1:
13361 {
13362 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13363 u32EffAddr += i8Disp;
13364 break;
13365 }
13366 case 2:
13367 {
13368 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13369 u32EffAddr += u32Disp;
13370 break;
13371 }
13372 default:
13373 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13374 }
13375
13376 }
13377 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13378 *pGCPtrEff = u32EffAddr;
13379 else
13380 {
13381 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13382 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13383 }
13384 }
13385 }
13386 else
13387 {
13388 uint64_t u64EffAddr;
13389
13390 /* Handle the rip+disp32 form with no registers first. */
13391 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13392 {
13393 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13394 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13395 }
13396 else
13397 {
13398 /* Get the register (or SIB) value. */
13399 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13400 {
13401 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13402 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13403 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13404 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13405 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13406 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13407 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13408 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13409 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13410 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13411 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13412 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13413 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13414 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13415 /* SIB */
13416 case 4:
13417 case 12:
13418 {
13419 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13420
13421 /* Get the index and scale it. */
13422 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13423 {
13424 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13425 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13426 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13427 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13428 case 4: u64EffAddr = 0; /*none */ break;
13429 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13430 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13431 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13432 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13433 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13434 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13435 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13436 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13437 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13438 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13439 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13440 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13441 }
13442 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13443
13444 /* add base */
13445 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13446 {
13447 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13448 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13449 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13450 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13451 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13452 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13453 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13454 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13455 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13456 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13457 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13458 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13459 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13460 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13461 /* complicated encodings */
13462 case 5:
13463 case 13:
13464 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13465 {
13466 if (!pVCpu->iem.s.uRexB)
13467 {
13468 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13469 SET_SS_DEF();
13470 }
13471 else
13472 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13473 }
13474 else
13475 {
13476 uint32_t u32Disp;
13477 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13478 u64EffAddr += (int32_t)u32Disp;
13479 }
13480 break;
13481 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13482 }
13483 break;
13484 }
13485 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13486 }
13487
13488 /* Get and add the displacement. */
13489 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13490 {
13491 case 0:
13492 break;
13493 case 1:
13494 {
13495 int8_t i8Disp;
13496 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13497 u64EffAddr += i8Disp;
13498 break;
13499 }
13500 case 2:
13501 {
13502 uint32_t u32Disp;
13503 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13504 u64EffAddr += (int32_t)u32Disp;
13505 break;
13506 }
13507 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13508 }
13509
13510 }
13511
13512 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13513 *pGCPtrEff = u64EffAddr;
13514 else
13515 {
13516 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13517 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13518 }
13519 }
13520
13521 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13522 return VINF_SUCCESS;
13523}
13524
13525
13526#ifdef IEM_WITH_SETJMP
13527/**
13528 * Calculates the effective address of a ModR/M memory operand.
13529 *
13530 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13531 *
13532 * May longjmp on internal error.
13533 *
13534 * @return The effective address.
13535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13536 * @param bRm The ModRM byte.
13537 * @param cbImm The size of any immediate following the
13538 * effective address opcode bytes. Important for
13539 * RIP relative addressing.
13540 */
13541IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13542{
13543 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13544# define SET_SS_DEF() \
13545 do \
13546 { \
13547 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13548 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13549 } while (0)
13550
13551 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13552 {
13553/** @todo Check the effective address size crap! */
13554 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13555 {
13556 uint16_t u16EffAddr;
13557
13558 /* Handle the disp16 form with no registers first. */
13559 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13560 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13561 else
13562 {
13563 /* Get the displacment. */
13564 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13565 {
13566 case 0: u16EffAddr = 0; break;
13567 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13568 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13569 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13570 }
13571
13572 /* Add the base and index registers to the disp. */
13573 switch (bRm & X86_MODRM_RM_MASK)
13574 {
13575 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13576 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13577 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13578 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13579 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13580 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13581 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13582 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13583 }
13584 }
13585
13586 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13587 return u16EffAddr;
13588 }
13589
13590 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13591 uint32_t u32EffAddr;
13592
13593 /* Handle the disp32 form with no registers first. */
13594 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13595 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13596 else
13597 {
13598 /* Get the register (or SIB) value. */
13599 switch ((bRm & X86_MODRM_RM_MASK))
13600 {
13601 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13602 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13603 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13604 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13605 case 4: /* SIB */
13606 {
13607 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13608
13609 /* Get the index and scale it. */
13610 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13611 {
13612 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13613 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13614 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13615 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13616 case 4: u32EffAddr = 0; /*none */ break;
13617 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13618 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13619 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13620 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13621 }
13622 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13623
13624 /* add base */
13625 switch (bSib & X86_SIB_BASE_MASK)
13626 {
13627 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13628 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13629 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13630 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13631 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13632 case 5:
13633 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13634 {
13635 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13636 SET_SS_DEF();
13637 }
13638 else
13639 {
13640 uint32_t u32Disp;
13641 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13642 u32EffAddr += u32Disp;
13643 }
13644 break;
13645 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13646 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13647 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13648 }
13649 break;
13650 }
13651 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13652 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13653 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13654 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13655 }
13656
13657 /* Get and add the displacement. */
13658 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13659 {
13660 case 0:
13661 break;
13662 case 1:
13663 {
13664 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13665 u32EffAddr += i8Disp;
13666 break;
13667 }
13668 case 2:
13669 {
13670 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13671 u32EffAddr += u32Disp;
13672 break;
13673 }
13674 default:
13675 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13676 }
13677 }
13678
13679 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13680 {
13681 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13682 return u32EffAddr;
13683 }
13684 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13685 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13686 return u32EffAddr & UINT16_MAX;
13687 }
13688
13689 uint64_t u64EffAddr;
13690
13691 /* Handle the rip+disp32 form with no registers first. */
13692 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13693 {
13694 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13695 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13696 }
13697 else
13698 {
13699 /* Get the register (or SIB) value. */
13700 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13701 {
13702 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13703 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13704 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13705 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13706 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13707 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13708 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13709 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13710 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13711 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13712 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13713 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13714 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13715 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13716 /* SIB */
13717 case 4:
13718 case 12:
13719 {
13720 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13721
13722 /* Get the index and scale it. */
13723 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13724 {
13725 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13726 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13727 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13728 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13729 case 4: u64EffAddr = 0; /*none */ break;
13730 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13731 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13732 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13733 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13734 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13735 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13736 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13737 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13738 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13739 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13740 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13741 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13742 }
13743 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13744
13745 /* add base */
13746 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13747 {
13748 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13749 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13750 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13751 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13752 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13753 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13754 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13755 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13756 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13757 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13758 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13759 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13760 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13761 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13762 /* complicated encodings */
13763 case 5:
13764 case 13:
13765 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13766 {
13767 if (!pVCpu->iem.s.uRexB)
13768 {
13769 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13770 SET_SS_DEF();
13771 }
13772 else
13773 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13774 }
13775 else
13776 {
13777 uint32_t u32Disp;
13778 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13779 u64EffAddr += (int32_t)u32Disp;
13780 }
13781 break;
13782 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13783 }
13784 break;
13785 }
13786 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13787 }
13788
13789 /* Get and add the displacement. */
13790 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13791 {
13792 case 0:
13793 break;
13794 case 1:
13795 {
13796 int8_t i8Disp;
13797 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13798 u64EffAddr += i8Disp;
13799 break;
13800 }
13801 case 2:
13802 {
13803 uint32_t u32Disp;
13804 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13805 u64EffAddr += (int32_t)u32Disp;
13806 break;
13807 }
13808 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13809 }
13810
13811 }
13812
13813 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13814 {
13815 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13816 return u64EffAddr;
13817 }
13818 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13819 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13820 return u64EffAddr & UINT32_MAX;
13821}
13822#endif /* IEM_WITH_SETJMP */
13823
13824/** @} */
13825
13826
13827
13828/*
13829 * Include the instructions
13830 */
13831#include "IEMAllInstructions.cpp.h"
13832
13833
13834
13835#ifdef LOG_ENABLED
13836/**
13837 * Logs the current instruction.
13838 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13839 * @param fSameCtx Set if we have the same context information as the VMM,
13840 * clear if we may have already executed an instruction in
13841 * our debug context. When clear, we assume IEMCPU holds
13842 * valid CPU mode info.
13843 *
13844 * The @a fSameCtx parameter is now misleading and obsolete.
13845 * @param pszFunction The IEM function doing the execution.
13846 */
13847IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13848{
13849# ifdef IN_RING3
13850 if (LogIs2Enabled())
13851 {
13852 char szInstr[256];
13853 uint32_t cbInstr = 0;
13854 if (fSameCtx)
13855 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13856 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13857 szInstr, sizeof(szInstr), &cbInstr);
13858 else
13859 {
13860 uint32_t fFlags = 0;
13861 switch (pVCpu->iem.s.enmCpuMode)
13862 {
13863 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13864 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13865 case IEMMODE_16BIT:
13866 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13867 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13868 else
13869 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13870 break;
13871 }
13872 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13873 szInstr, sizeof(szInstr), &cbInstr);
13874 }
13875
13876 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13877 Log2(("**** %s\n"
13878 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13879 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13880 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13881 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13882 " %s\n"
13883 , pszFunction,
13884 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13885 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13886 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13887 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13888 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13889 szInstr));
13890
13891 if (LogIs3Enabled())
13892 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13893 }
13894 else
13895# endif
13896 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13897 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13898 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13899}
13900#endif /* LOG_ENABLED */
13901
13902
13903/**
13904 * Makes status code addjustments (pass up from I/O and access handler)
13905 * as well as maintaining statistics.
13906 *
13907 * @returns Strict VBox status code to pass up.
13908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13909 * @param rcStrict The status from executing an instruction.
13910 */
13911DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13912{
13913 if (rcStrict != VINF_SUCCESS)
13914 {
13915 if (RT_SUCCESS(rcStrict))
13916 {
13917 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13918 || rcStrict == VINF_IOM_R3_IOPORT_READ
13919 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13920 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13921 || rcStrict == VINF_IOM_R3_MMIO_READ
13922 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13923 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13924 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13925 || rcStrict == VINF_CPUM_R3_MSR_READ
13926 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13927 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13928 || rcStrict == VINF_EM_RAW_TO_R3
13929 || rcStrict == VINF_EM_TRIPLE_FAULT
13930 || rcStrict == VINF_GIM_R3_HYPERCALL
13931 /* raw-mode / virt handlers only: */
13932 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13933 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13934 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13935 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13936 || rcStrict == VINF_SELM_SYNC_GDT
13937 || rcStrict == VINF_CSAM_PENDING_ACTION
13938 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13939 /* nested hw.virt codes: */
13940 || rcStrict == VINF_VMX_VMEXIT
13941 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13942 || rcStrict == VINF_SVM_VMEXIT
13943 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13944/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13945 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13946#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13947 if ( rcStrict == VINF_VMX_VMEXIT
13948 && rcPassUp == VINF_SUCCESS)
13949 rcStrict = VINF_SUCCESS;
13950 else
13951#endif
13952#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13953 if ( rcStrict == VINF_SVM_VMEXIT
13954 && rcPassUp == VINF_SUCCESS)
13955 rcStrict = VINF_SUCCESS;
13956 else
13957#endif
13958 if (rcPassUp == VINF_SUCCESS)
13959 pVCpu->iem.s.cRetInfStatuses++;
13960 else if ( rcPassUp < VINF_EM_FIRST
13961 || rcPassUp > VINF_EM_LAST
13962 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13963 {
13964 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13965 pVCpu->iem.s.cRetPassUpStatus++;
13966 rcStrict = rcPassUp;
13967 }
13968 else
13969 {
13970 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13971 pVCpu->iem.s.cRetInfStatuses++;
13972 }
13973 }
13974 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13975 pVCpu->iem.s.cRetAspectNotImplemented++;
13976 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13977 pVCpu->iem.s.cRetInstrNotImplemented++;
13978 else
13979 pVCpu->iem.s.cRetErrStatuses++;
13980 }
13981 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13982 {
13983 pVCpu->iem.s.cRetPassUpStatus++;
13984 rcStrict = pVCpu->iem.s.rcPassUp;
13985 }
13986
13987 return rcStrict;
13988}
13989
13990
13991/**
13992 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13993 * IEMExecOneWithPrefetchedByPC.
13994 *
13995 * Similar code is found in IEMExecLots.
13996 *
13997 * @return Strict VBox status code.
13998 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13999 * @param fExecuteInhibit If set, execute the instruction following CLI,
14000 * POP SS and MOV SS,GR.
14001 * @param pszFunction The calling function name.
14002 */
14003DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
14004{
14005 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14006 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14007 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14008 RT_NOREF_PV(pszFunction);
14009
14010#ifdef IEM_WITH_SETJMP
14011 VBOXSTRICTRC rcStrict;
14012 jmp_buf JmpBuf;
14013 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14014 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14015 if ((rcStrict = setjmp(JmpBuf)) == 0)
14016 {
14017 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14018 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14019 }
14020 else
14021 pVCpu->iem.s.cLongJumps++;
14022 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14023#else
14024 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14025 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14026#endif
14027 if (rcStrict == VINF_SUCCESS)
14028 pVCpu->iem.s.cInstructions++;
14029 if (pVCpu->iem.s.cActiveMappings > 0)
14030 {
14031 Assert(rcStrict != VINF_SUCCESS);
14032 iemMemRollback(pVCpu);
14033 }
14034 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14035 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14036 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14037
14038//#ifdef DEBUG
14039// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14040//#endif
14041
14042#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14043 /*
14044 * Perform any VMX nested-guest instruction boundary actions.
14045 *
14046 * If any of these causes a VM-exit, we must skip executing the next
14047 * instruction (would run into stale page tables). A VM-exit makes sure
14048 * there is no interrupt-inhibition, so that should ensure we don't go
14049 * to try execute the next instruction. Clearing fExecuteInhibit is
14050 * problematic because of the setjmp/longjmp clobbering above.
14051 */
14052 if ( rcStrict == VINF_SUCCESS
14053 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14054 {
14055 /* TPR-below threshold/APIC write has the highest priority. */
14056 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14057 {
14058 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14059 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14060 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14061 }
14062 /* MTF takes priority over VMX-preemption timer. */
14063 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14064 {
14065 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF);
14066 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14067 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14068 }
14069 /* VMX preemption timer takes priority over NMI-window exits. */
14070 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14071 {
14072 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14073 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14074 rcStrict = VINF_SUCCESS;
14075 else
14076 {
14077 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14078 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14079 }
14080 }
14081 /* NMI-window VM-exit. */
14082 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW))
14083 {
14084 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW);
14085 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14086 }
14087 }
14088#endif
14089
14090 /* Execute the next instruction as well if a cli, pop ss or
14091 mov ss, Gr has just completed successfully. */
14092 if ( fExecuteInhibit
14093 && rcStrict == VINF_SUCCESS
14094 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14095 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14096 {
14097 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14098 if (rcStrict == VINF_SUCCESS)
14099 {
14100#ifdef LOG_ENABLED
14101 iemLogCurInstr(pVCpu, false, pszFunction);
14102#endif
14103#ifdef IEM_WITH_SETJMP
14104 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14105 if ((rcStrict = setjmp(JmpBuf)) == 0)
14106 {
14107 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14108 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14109 }
14110 else
14111 pVCpu->iem.s.cLongJumps++;
14112 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14113#else
14114 IEM_OPCODE_GET_NEXT_U8(&b);
14115 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14116#endif
14117 if (rcStrict == VINF_SUCCESS)
14118 pVCpu->iem.s.cInstructions++;
14119 if (pVCpu->iem.s.cActiveMappings > 0)
14120 {
14121 Assert(rcStrict != VINF_SUCCESS);
14122 iemMemRollback(pVCpu);
14123 }
14124 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14125 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14126 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14127 }
14128 else if (pVCpu->iem.s.cActiveMappings > 0)
14129 iemMemRollback(pVCpu);
14130 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14131 }
14132
14133 /*
14134 * Return value fiddling, statistics and sanity assertions.
14135 */
14136 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14137
14138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14140 return rcStrict;
14141}
14142
14143
14144#ifdef IN_RC
14145/**
14146 * Re-enters raw-mode or ensure we return to ring-3.
14147 *
14148 * @returns rcStrict, maybe modified.
14149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14150 * @param rcStrict The status code returne by the interpreter.
14151 */
14152DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14153{
14154 if ( !pVCpu->iem.s.fInPatchCode
14155 && ( rcStrict == VINF_SUCCESS
14156 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14157 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14158 {
14159 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14160 CPUMRawEnter(pVCpu);
14161 else
14162 {
14163 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14164 rcStrict = VINF_EM_RESCHEDULE;
14165 }
14166 }
14167 return rcStrict;
14168}
14169#endif
14170
14171
14172/**
14173 * Execute one instruction.
14174 *
14175 * @return Strict VBox status code.
14176 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14177 */
14178VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14179{
14180#ifdef LOG_ENABLED
14181 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14182#endif
14183
14184 /*
14185 * Do the decoding and emulation.
14186 */
14187 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14188 if (rcStrict == VINF_SUCCESS)
14189 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14190 else if (pVCpu->iem.s.cActiveMappings > 0)
14191 iemMemRollback(pVCpu);
14192
14193#ifdef IN_RC
14194 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14195#endif
14196 if (rcStrict != VINF_SUCCESS)
14197 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14198 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14199 return rcStrict;
14200}
14201
14202
14203VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14204{
14205 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14206
14207 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14208 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14209 if (rcStrict == VINF_SUCCESS)
14210 {
14211 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14212 if (pcbWritten)
14213 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14214 }
14215 else if (pVCpu->iem.s.cActiveMappings > 0)
14216 iemMemRollback(pVCpu);
14217
14218#ifdef IN_RC
14219 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14220#endif
14221 return rcStrict;
14222}
14223
14224
14225VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14226 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14227{
14228 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14229
14230 VBOXSTRICTRC rcStrict;
14231 if ( cbOpcodeBytes
14232 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14233 {
14234 iemInitDecoder(pVCpu, false);
14235#ifdef IEM_WITH_CODE_TLB
14236 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14237 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14238 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14239 pVCpu->iem.s.offCurInstrStart = 0;
14240 pVCpu->iem.s.offInstrNextByte = 0;
14241#else
14242 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14243 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14244#endif
14245 rcStrict = VINF_SUCCESS;
14246 }
14247 else
14248 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14249 if (rcStrict == VINF_SUCCESS)
14250 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14251 else if (pVCpu->iem.s.cActiveMappings > 0)
14252 iemMemRollback(pVCpu);
14253
14254#ifdef IN_RC
14255 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14256#endif
14257 return rcStrict;
14258}
14259
14260
14261VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14262{
14263 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14264
14265 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14266 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14267 if (rcStrict == VINF_SUCCESS)
14268 {
14269 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14270 if (pcbWritten)
14271 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14272 }
14273 else if (pVCpu->iem.s.cActiveMappings > 0)
14274 iemMemRollback(pVCpu);
14275
14276#ifdef IN_RC
14277 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14278#endif
14279 return rcStrict;
14280}
14281
14282
14283VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14284 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14285{
14286 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14287
14288 VBOXSTRICTRC rcStrict;
14289 if ( cbOpcodeBytes
14290 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14291 {
14292 iemInitDecoder(pVCpu, true);
14293#ifdef IEM_WITH_CODE_TLB
14294 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14295 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14296 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14297 pVCpu->iem.s.offCurInstrStart = 0;
14298 pVCpu->iem.s.offInstrNextByte = 0;
14299#else
14300 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14301 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14302#endif
14303 rcStrict = VINF_SUCCESS;
14304 }
14305 else
14306 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14307 if (rcStrict == VINF_SUCCESS)
14308 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14309 else if (pVCpu->iem.s.cActiveMappings > 0)
14310 iemMemRollback(pVCpu);
14311
14312#ifdef IN_RC
14313 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14314#endif
14315 return rcStrict;
14316}
14317
14318
14319/**
14320 * For debugging DISGetParamSize, may come in handy.
14321 *
14322 * @returns Strict VBox status code.
14323 * @param pVCpu The cross context virtual CPU structure of the
14324 * calling EMT.
14325 * @param pCtxCore The context core structure.
14326 * @param OpcodeBytesPC The PC of the opcode bytes.
14327 * @param pvOpcodeBytes Prefeched opcode bytes.
14328 * @param cbOpcodeBytes Number of prefetched bytes.
14329 * @param pcbWritten Where to return the number of bytes written.
14330 * Optional.
14331 */
14332VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14333 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14334 uint32_t *pcbWritten)
14335{
14336 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14337
14338 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14339 VBOXSTRICTRC rcStrict;
14340 if ( cbOpcodeBytes
14341 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14342 {
14343 iemInitDecoder(pVCpu, true);
14344#ifdef IEM_WITH_CODE_TLB
14345 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14346 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14347 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14348 pVCpu->iem.s.offCurInstrStart = 0;
14349 pVCpu->iem.s.offInstrNextByte = 0;
14350#else
14351 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14352 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14353#endif
14354 rcStrict = VINF_SUCCESS;
14355 }
14356 else
14357 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14358 if (rcStrict == VINF_SUCCESS)
14359 {
14360 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14361 if (pcbWritten)
14362 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14363 }
14364 else if (pVCpu->iem.s.cActiveMappings > 0)
14365 iemMemRollback(pVCpu);
14366
14367#ifdef IN_RC
14368 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14369#endif
14370 return rcStrict;
14371}
14372
14373
14374VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14375{
14376 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14377 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14378
14379 /*
14380 * See if there is an interrupt pending in TRPM, inject it if we can.
14381 */
14382 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14383#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14384 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14385 if (fIntrEnabled)
14386 {
14387 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14388 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14389 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14390 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14391 else
14392 {
14393 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14394 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14395 }
14396 }
14397#else
14398 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14399#endif
14400 if ( fIntrEnabled
14401 && TRPMHasTrap(pVCpu)
14402 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14403 {
14404 uint8_t u8TrapNo;
14405 TRPMEVENT enmType;
14406 RTGCUINT uErrCode;
14407 RTGCPTR uCr2;
14408 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14409 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14410 TRPMResetTrap(pVCpu);
14411#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14412 /* Injecting an event may cause a VM-exit. */
14413 if ( rcStrict != VINF_SUCCESS
14414 && rcStrict != VINF_IEM_RAISED_XCPT)
14415 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14416#else
14417 NOREF(rcStrict);
14418#endif
14419 }
14420
14421 /*
14422 * Initial decoder init w/ prefetch, then setup setjmp.
14423 */
14424 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14425 if (rcStrict == VINF_SUCCESS)
14426 {
14427#ifdef IEM_WITH_SETJMP
14428 jmp_buf JmpBuf;
14429 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14430 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14431 pVCpu->iem.s.cActiveMappings = 0;
14432 if ((rcStrict = setjmp(JmpBuf)) == 0)
14433#endif
14434 {
14435 /*
14436 * The run loop. We limit ourselves to 4096 instructions right now.
14437 */
14438 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14439 PVM pVM = pVCpu->CTX_SUFF(pVM);
14440 for (;;)
14441 {
14442 /*
14443 * Log the state.
14444 */
14445#ifdef LOG_ENABLED
14446 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14447#endif
14448
14449 /*
14450 * Do the decoding and emulation.
14451 */
14452 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14453 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14454 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14455 {
14456 Assert(pVCpu->iem.s.cActiveMappings == 0);
14457 pVCpu->iem.s.cInstructions++;
14458 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14459 {
14460 uint64_t fCpu = pVCpu->fLocalForcedActions
14461 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14462 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14463 | VMCPU_FF_TLB_FLUSH
14464#ifdef VBOX_WITH_RAW_MODE
14465 | VMCPU_FF_TRPM_SYNC_IDT
14466 | VMCPU_FF_SELM_SYNC_TSS
14467 | VMCPU_FF_SELM_SYNC_GDT
14468 | VMCPU_FF_SELM_SYNC_LDT
14469#endif
14470 | VMCPU_FF_INHIBIT_INTERRUPTS
14471 | VMCPU_FF_BLOCK_NMIS
14472 | VMCPU_FF_UNHALT ));
14473
14474 if (RT_LIKELY( ( !fCpu
14475 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14476 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14477 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14478 {
14479 if (cMaxInstructionsGccStupidity-- > 0)
14480 {
14481 /* Poll timers every now an then according to the caller's specs. */
14482 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14483 || !TMTimerPollBool(pVM, pVCpu))
14484 {
14485 Assert(pVCpu->iem.s.cActiveMappings == 0);
14486 iemReInitDecoder(pVCpu);
14487 continue;
14488 }
14489 }
14490 }
14491 }
14492 Assert(pVCpu->iem.s.cActiveMappings == 0);
14493 }
14494 else if (pVCpu->iem.s.cActiveMappings > 0)
14495 iemMemRollback(pVCpu);
14496 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14497 break;
14498 }
14499 }
14500#ifdef IEM_WITH_SETJMP
14501 else
14502 {
14503 if (pVCpu->iem.s.cActiveMappings > 0)
14504 iemMemRollback(pVCpu);
14505# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14506 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14507# endif
14508 pVCpu->iem.s.cLongJumps++;
14509 }
14510 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14511#endif
14512
14513 /*
14514 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14515 */
14516 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14517 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14518 }
14519 else
14520 {
14521 if (pVCpu->iem.s.cActiveMappings > 0)
14522 iemMemRollback(pVCpu);
14523
14524#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14525 /*
14526 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14527 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14528 */
14529 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14530#endif
14531 }
14532
14533 /*
14534 * Maybe re-enter raw-mode and log.
14535 */
14536#ifdef IN_RC
14537 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14538#endif
14539 if (rcStrict != VINF_SUCCESS)
14540 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14541 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14542 if (pcInstructions)
14543 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14544 return rcStrict;
14545}
14546
14547
14548/**
14549 * Interface used by EMExecuteExec, does exit statistics and limits.
14550 *
14551 * @returns Strict VBox status code.
14552 * @param pVCpu The cross context virtual CPU structure.
14553 * @param fWillExit To be defined.
14554 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14555 * @param cMaxInstructions Maximum number of instructions to execute.
14556 * @param cMaxInstructionsWithoutExits
14557 * The max number of instructions without exits.
14558 * @param pStats Where to return statistics.
14559 */
14560VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14561 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14562{
14563 NOREF(fWillExit); /** @todo define flexible exit crits */
14564
14565 /*
14566 * Initialize return stats.
14567 */
14568 pStats->cInstructions = 0;
14569 pStats->cExits = 0;
14570 pStats->cMaxExitDistance = 0;
14571 pStats->cReserved = 0;
14572
14573 /*
14574 * Initial decoder init w/ prefetch, then setup setjmp.
14575 */
14576 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14577 if (rcStrict == VINF_SUCCESS)
14578 {
14579#ifdef IEM_WITH_SETJMP
14580 jmp_buf JmpBuf;
14581 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14582 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14583 pVCpu->iem.s.cActiveMappings = 0;
14584 if ((rcStrict = setjmp(JmpBuf)) == 0)
14585#endif
14586 {
14587#ifdef IN_RING0
14588 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14589#endif
14590 uint32_t cInstructionSinceLastExit = 0;
14591
14592 /*
14593 * The run loop. We limit ourselves to 4096 instructions right now.
14594 */
14595 PVM pVM = pVCpu->CTX_SUFF(pVM);
14596 for (;;)
14597 {
14598 /*
14599 * Log the state.
14600 */
14601#ifdef LOG_ENABLED
14602 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14603#endif
14604
14605 /*
14606 * Do the decoding and emulation.
14607 */
14608 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14609
14610 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14611 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14612
14613 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14614 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14615 {
14616 pStats->cExits += 1;
14617 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14618 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14619 cInstructionSinceLastExit = 0;
14620 }
14621
14622 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14623 {
14624 Assert(pVCpu->iem.s.cActiveMappings == 0);
14625 pVCpu->iem.s.cInstructions++;
14626 pStats->cInstructions++;
14627 cInstructionSinceLastExit++;
14628 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14629 {
14630 uint64_t fCpu = pVCpu->fLocalForcedActions
14631 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14632 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14633 | VMCPU_FF_TLB_FLUSH
14634#ifdef VBOX_WITH_RAW_MODE
14635 | VMCPU_FF_TRPM_SYNC_IDT
14636 | VMCPU_FF_SELM_SYNC_TSS
14637 | VMCPU_FF_SELM_SYNC_GDT
14638 | VMCPU_FF_SELM_SYNC_LDT
14639#endif
14640 | VMCPU_FF_INHIBIT_INTERRUPTS
14641 | VMCPU_FF_BLOCK_NMIS
14642 | VMCPU_FF_UNHALT ));
14643
14644 if (RT_LIKELY( ( ( !fCpu
14645 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14646 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14647 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14648 || pStats->cInstructions < cMinInstructions))
14649 {
14650 if (pStats->cInstructions < cMaxInstructions)
14651 {
14652 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14653 {
14654#ifdef IN_RING0
14655 if ( !fCheckPreemptionPending
14656 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14657#endif
14658 {
14659 Assert(pVCpu->iem.s.cActiveMappings == 0);
14660 iemReInitDecoder(pVCpu);
14661 continue;
14662 }
14663#ifdef IN_RING0
14664 rcStrict = VINF_EM_RAW_INTERRUPT;
14665 break;
14666#endif
14667 }
14668 }
14669 }
14670 Assert(!(fCpu & VMCPU_FF_IEM));
14671 }
14672 Assert(pVCpu->iem.s.cActiveMappings == 0);
14673 }
14674 else if (pVCpu->iem.s.cActiveMappings > 0)
14675 iemMemRollback(pVCpu);
14676 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14677 break;
14678 }
14679 }
14680#ifdef IEM_WITH_SETJMP
14681 else
14682 {
14683 if (pVCpu->iem.s.cActiveMappings > 0)
14684 iemMemRollback(pVCpu);
14685 pVCpu->iem.s.cLongJumps++;
14686 }
14687 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14688#endif
14689
14690 /*
14691 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14692 */
14693 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14694 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14695 }
14696 else
14697 {
14698 if (pVCpu->iem.s.cActiveMappings > 0)
14699 iemMemRollback(pVCpu);
14700
14701#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14702 /*
14703 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14704 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14705 */
14706 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14707#endif
14708 }
14709
14710 /*
14711 * Maybe re-enter raw-mode and log.
14712 */
14713#ifdef IN_RC
14714 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14715#endif
14716 if (rcStrict != VINF_SUCCESS)
14717 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14718 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14719 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14720 return rcStrict;
14721}
14722
14723
14724/**
14725 * Injects a trap, fault, abort, software interrupt or external interrupt.
14726 *
14727 * The parameter list matches TRPMQueryTrapAll pretty closely.
14728 *
14729 * @returns Strict VBox status code.
14730 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14731 * @param u8TrapNo The trap number.
14732 * @param enmType What type is it (trap/fault/abort), software
14733 * interrupt or hardware interrupt.
14734 * @param uErrCode The error code if applicable.
14735 * @param uCr2 The CR2 value if applicable.
14736 * @param cbInstr The instruction length (only relevant for
14737 * software interrupts).
14738 */
14739VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14740 uint8_t cbInstr)
14741{
14742 iemInitDecoder(pVCpu, false);
14743#ifdef DBGFTRACE_ENABLED
14744 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14745 u8TrapNo, enmType, uErrCode, uCr2);
14746#endif
14747
14748 uint32_t fFlags;
14749 switch (enmType)
14750 {
14751 case TRPM_HARDWARE_INT:
14752 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14753 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14754 uErrCode = uCr2 = 0;
14755 break;
14756
14757 case TRPM_SOFTWARE_INT:
14758 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14759 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14760 uErrCode = uCr2 = 0;
14761 break;
14762
14763 case TRPM_TRAP:
14764 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14765 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14766 if (u8TrapNo == X86_XCPT_PF)
14767 fFlags |= IEM_XCPT_FLAGS_CR2;
14768 switch (u8TrapNo)
14769 {
14770 case X86_XCPT_DF:
14771 case X86_XCPT_TS:
14772 case X86_XCPT_NP:
14773 case X86_XCPT_SS:
14774 case X86_XCPT_PF:
14775 case X86_XCPT_AC:
14776 fFlags |= IEM_XCPT_FLAGS_ERR;
14777 break;
14778 }
14779 break;
14780
14781 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14782 }
14783
14784 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14785
14786 if (pVCpu->iem.s.cActiveMappings > 0)
14787 iemMemRollback(pVCpu);
14788
14789 return rcStrict;
14790}
14791
14792
14793/**
14794 * Injects the active TRPM event.
14795 *
14796 * @returns Strict VBox status code.
14797 * @param pVCpu The cross context virtual CPU structure.
14798 */
14799VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14800{
14801#ifndef IEM_IMPLEMENTS_TASKSWITCH
14802 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14803#else
14804 uint8_t u8TrapNo;
14805 TRPMEVENT enmType;
14806 RTGCUINT uErrCode;
14807 RTGCUINTPTR uCr2;
14808 uint8_t cbInstr;
14809 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14810 if (RT_FAILURE(rc))
14811 return rc;
14812
14813 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14814#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14815 if (rcStrict == VINF_SVM_VMEXIT)
14816 rcStrict = VINF_SUCCESS;
14817#endif
14818#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14819 if (rcStrict == VINF_VMX_VMEXIT)
14820 rcStrict = VINF_SUCCESS;
14821#endif
14822 /** @todo Are there any other codes that imply the event was successfully
14823 * delivered to the guest? See @bugref{6607}. */
14824 if ( rcStrict == VINF_SUCCESS
14825 || rcStrict == VINF_IEM_RAISED_XCPT)
14826 TRPMResetTrap(pVCpu);
14827
14828 return rcStrict;
14829#endif
14830}
14831
14832
14833VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14834{
14835 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14836 return VERR_NOT_IMPLEMENTED;
14837}
14838
14839
14840VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14841{
14842 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14843 return VERR_NOT_IMPLEMENTED;
14844}
14845
14846
14847#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14848/**
14849 * Executes a IRET instruction with default operand size.
14850 *
14851 * This is for PATM.
14852 *
14853 * @returns VBox status code.
14854 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14855 * @param pCtxCore The register frame.
14856 */
14857VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14858{
14859 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14860
14861 iemCtxCoreToCtx(pCtx, pCtxCore);
14862 iemInitDecoder(pVCpu);
14863 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14864 if (rcStrict == VINF_SUCCESS)
14865 iemCtxToCtxCore(pCtxCore, pCtx);
14866 else
14867 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14868 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14869 return rcStrict;
14870}
14871#endif
14872
14873
14874/**
14875 * Macro used by the IEMExec* method to check the given instruction length.
14876 *
14877 * Will return on failure!
14878 *
14879 * @param a_cbInstr The given instruction length.
14880 * @param a_cbMin The minimum length.
14881 */
14882#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14883 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14884 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14885
14886
14887/**
14888 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14889 *
14890 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14891 *
14892 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14894 * @param rcStrict The status code to fiddle.
14895 */
14896DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14897{
14898 iemUninitExec(pVCpu);
14899#ifdef IN_RC
14900 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14901#else
14902 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14903#endif
14904}
14905
14906
14907/**
14908 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14909 *
14910 * This API ASSUMES that the caller has already verified that the guest code is
14911 * allowed to access the I/O port. (The I/O port is in the DX register in the
14912 * guest state.)
14913 *
14914 * @returns Strict VBox status code.
14915 * @param pVCpu The cross context virtual CPU structure.
14916 * @param cbValue The size of the I/O port access (1, 2, or 4).
14917 * @param enmAddrMode The addressing mode.
14918 * @param fRepPrefix Indicates whether a repeat prefix is used
14919 * (doesn't matter which for this instruction).
14920 * @param cbInstr The instruction length in bytes.
14921 * @param iEffSeg The effective segment address.
14922 * @param fIoChecked Whether the access to the I/O port has been
14923 * checked or not. It's typically checked in the
14924 * HM scenario.
14925 */
14926VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14927 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14928{
14929 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14930 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14931
14932 /*
14933 * State init.
14934 */
14935 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14936
14937 /*
14938 * Switch orgy for getting to the right handler.
14939 */
14940 VBOXSTRICTRC rcStrict;
14941 if (fRepPrefix)
14942 {
14943 switch (enmAddrMode)
14944 {
14945 case IEMMODE_16BIT:
14946 switch (cbValue)
14947 {
14948 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14949 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14950 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14951 default:
14952 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14953 }
14954 break;
14955
14956 case IEMMODE_32BIT:
14957 switch (cbValue)
14958 {
14959 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14960 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14961 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14962 default:
14963 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14964 }
14965 break;
14966
14967 case IEMMODE_64BIT:
14968 switch (cbValue)
14969 {
14970 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14971 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14972 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14973 default:
14974 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14975 }
14976 break;
14977
14978 default:
14979 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14980 }
14981 }
14982 else
14983 {
14984 switch (enmAddrMode)
14985 {
14986 case IEMMODE_16BIT:
14987 switch (cbValue)
14988 {
14989 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14990 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14991 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14992 default:
14993 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14994 }
14995 break;
14996
14997 case IEMMODE_32BIT:
14998 switch (cbValue)
14999 {
15000 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15001 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15002 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15003 default:
15004 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15005 }
15006 break;
15007
15008 case IEMMODE_64BIT:
15009 switch (cbValue)
15010 {
15011 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15012 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15013 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15014 default:
15015 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15016 }
15017 break;
15018
15019 default:
15020 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15021 }
15022 }
15023
15024 if (pVCpu->iem.s.cActiveMappings)
15025 iemMemRollback(pVCpu);
15026
15027 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15028}
15029
15030
15031/**
15032 * Interface for HM and EM for executing string I/O IN (read) instructions.
15033 *
15034 * This API ASSUMES that the caller has already verified that the guest code is
15035 * allowed to access the I/O port. (The I/O port is in the DX register in the
15036 * guest state.)
15037 *
15038 * @returns Strict VBox status code.
15039 * @param pVCpu The cross context virtual CPU structure.
15040 * @param cbValue The size of the I/O port access (1, 2, or 4).
15041 * @param enmAddrMode The addressing mode.
15042 * @param fRepPrefix Indicates whether a repeat prefix is used
15043 * (doesn't matter which for this instruction).
15044 * @param cbInstr The instruction length in bytes.
15045 * @param fIoChecked Whether the access to the I/O port has been
15046 * checked or not. It's typically checked in the
15047 * HM scenario.
15048 */
15049VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15050 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15051{
15052 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15053
15054 /*
15055 * State init.
15056 */
15057 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15058
15059 /*
15060 * Switch orgy for getting to the right handler.
15061 */
15062 VBOXSTRICTRC rcStrict;
15063 if (fRepPrefix)
15064 {
15065 switch (enmAddrMode)
15066 {
15067 case IEMMODE_16BIT:
15068 switch (cbValue)
15069 {
15070 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15071 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15072 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15073 default:
15074 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15075 }
15076 break;
15077
15078 case IEMMODE_32BIT:
15079 switch (cbValue)
15080 {
15081 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15082 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15083 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15084 default:
15085 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15086 }
15087 break;
15088
15089 case IEMMODE_64BIT:
15090 switch (cbValue)
15091 {
15092 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15093 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15094 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15095 default:
15096 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15097 }
15098 break;
15099
15100 default:
15101 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15102 }
15103 }
15104 else
15105 {
15106 switch (enmAddrMode)
15107 {
15108 case IEMMODE_16BIT:
15109 switch (cbValue)
15110 {
15111 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15112 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15113 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15114 default:
15115 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15116 }
15117 break;
15118
15119 case IEMMODE_32BIT:
15120 switch (cbValue)
15121 {
15122 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15123 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15124 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15125 default:
15126 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15127 }
15128 break;
15129
15130 case IEMMODE_64BIT:
15131 switch (cbValue)
15132 {
15133 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15134 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15135 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15136 default:
15137 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15138 }
15139 break;
15140
15141 default:
15142 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15143 }
15144 }
15145
15146 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15147 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15148}
15149
15150
15151/**
15152 * Interface for rawmode to write execute an OUT instruction.
15153 *
15154 * @returns Strict VBox status code.
15155 * @param pVCpu The cross context virtual CPU structure.
15156 * @param cbInstr The instruction length in bytes.
15157 * @param u16Port The port to read.
15158 * @param fImm Whether the port is specified using an immediate operand or
15159 * using the implicit DX register.
15160 * @param cbReg The register size.
15161 *
15162 * @remarks In ring-0 not all of the state needs to be synced in.
15163 */
15164VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15165{
15166 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15167 Assert(cbReg <= 4 && cbReg != 3);
15168
15169 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15170 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15171 Assert(!pVCpu->iem.s.cActiveMappings);
15172 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15173}
15174
15175
15176/**
15177 * Interface for rawmode to write execute an IN instruction.
15178 *
15179 * @returns Strict VBox status code.
15180 * @param pVCpu The cross context virtual CPU structure.
15181 * @param cbInstr The instruction length in bytes.
15182 * @param u16Port The port to read.
15183 * @param fImm Whether the port is specified using an immediate operand or
15184 * using the implicit DX.
15185 * @param cbReg The register size.
15186 */
15187VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15188{
15189 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15190 Assert(cbReg <= 4 && cbReg != 3);
15191
15192 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15193 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15194 Assert(!pVCpu->iem.s.cActiveMappings);
15195 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15196}
15197
15198
15199/**
15200 * Interface for HM and EM to write to a CRx register.
15201 *
15202 * @returns Strict VBox status code.
15203 * @param pVCpu The cross context virtual CPU structure.
15204 * @param cbInstr The instruction length in bytes.
15205 * @param iCrReg The control register number (destination).
15206 * @param iGReg The general purpose register number (source).
15207 *
15208 * @remarks In ring-0 not all of the state needs to be synced in.
15209 */
15210VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15211{
15212 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15213 Assert(iCrReg < 16);
15214 Assert(iGReg < 16);
15215
15216 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15217 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15218 Assert(!pVCpu->iem.s.cActiveMappings);
15219 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15220}
15221
15222
15223/**
15224 * Interface for HM and EM to read from a CRx register.
15225 *
15226 * @returns Strict VBox status code.
15227 * @param pVCpu The cross context virtual CPU structure.
15228 * @param cbInstr The instruction length in bytes.
15229 * @param iGReg The general purpose register number (destination).
15230 * @param iCrReg The control register number (source).
15231 *
15232 * @remarks In ring-0 not all of the state needs to be synced in.
15233 */
15234VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15235{
15236 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15237 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15238 | CPUMCTX_EXTRN_APIC_TPR);
15239 Assert(iCrReg < 16);
15240 Assert(iGReg < 16);
15241
15242 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15243 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15244 Assert(!pVCpu->iem.s.cActiveMappings);
15245 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15246}
15247
15248
15249/**
15250 * Interface for HM and EM to clear the CR0[TS] bit.
15251 *
15252 * @returns Strict VBox status code.
15253 * @param pVCpu The cross context virtual CPU structure.
15254 * @param cbInstr The instruction length in bytes.
15255 *
15256 * @remarks In ring-0 not all of the state needs to be synced in.
15257 */
15258VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15259{
15260 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15261
15262 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15263 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15264 Assert(!pVCpu->iem.s.cActiveMappings);
15265 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15266}
15267
15268
15269/**
15270 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15271 *
15272 * @returns Strict VBox status code.
15273 * @param pVCpu The cross context virtual CPU structure.
15274 * @param cbInstr The instruction length in bytes.
15275 * @param uValue The value to load into CR0.
15276 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15277 * memory operand. Otherwise pass NIL_RTGCPTR.
15278 *
15279 * @remarks In ring-0 not all of the state needs to be synced in.
15280 */
15281VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15282{
15283 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15284
15285 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15286 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15287 Assert(!pVCpu->iem.s.cActiveMappings);
15288 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15289}
15290
15291
15292/**
15293 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15294 *
15295 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15296 *
15297 * @returns Strict VBox status code.
15298 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15299 * @param cbInstr The instruction length in bytes.
15300 * @remarks In ring-0 not all of the state needs to be synced in.
15301 * @thread EMT(pVCpu)
15302 */
15303VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15304{
15305 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15306
15307 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15308 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15309 Assert(!pVCpu->iem.s.cActiveMappings);
15310 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15311}
15312
15313
15314/**
15315 * Interface for HM and EM to emulate the WBINVD instruction.
15316 *
15317 * @returns Strict VBox status code.
15318 * @param pVCpu The cross context virtual CPU structure.
15319 * @param cbInstr The instruction length in bytes.
15320 *
15321 * @remarks In ring-0 not all of the state needs to be synced in.
15322 */
15323VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15324{
15325 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15326
15327 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15328 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15329 Assert(!pVCpu->iem.s.cActiveMappings);
15330 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15331}
15332
15333
15334/**
15335 * Interface for HM and EM to emulate the INVD instruction.
15336 *
15337 * @returns Strict VBox status code.
15338 * @param pVCpu The cross context virtual CPU structure.
15339 * @param cbInstr The instruction length in bytes.
15340 *
15341 * @remarks In ring-0 not all of the state needs to be synced in.
15342 */
15343VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15344{
15345 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15346
15347 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15348 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15349 Assert(!pVCpu->iem.s.cActiveMappings);
15350 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15351}
15352
15353
15354/**
15355 * Interface for HM and EM to emulate the INVLPG instruction.
15356 *
15357 * @returns Strict VBox status code.
15358 * @retval VINF_PGM_SYNC_CR3
15359 *
15360 * @param pVCpu The cross context virtual CPU structure.
15361 * @param cbInstr The instruction length in bytes.
15362 * @param GCPtrPage The effective address of the page to invalidate.
15363 *
15364 * @remarks In ring-0 not all of the state needs to be synced in.
15365 */
15366VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15367{
15368 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15369
15370 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15371 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15372 Assert(!pVCpu->iem.s.cActiveMappings);
15373 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15374}
15375
15376
15377/**
15378 * Interface for HM and EM to emulate the CPUID instruction.
15379 *
15380 * @returns Strict VBox status code.
15381 *
15382 * @param pVCpu The cross context virtual CPU structure.
15383 * @param cbInstr The instruction length in bytes.
15384 *
15385 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15386 */
15387VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15388{
15389 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15390 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15391
15392 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15393 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15394 Assert(!pVCpu->iem.s.cActiveMappings);
15395 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15396}
15397
15398
15399/**
15400 * Interface for HM and EM to emulate the RDPMC instruction.
15401 *
15402 * @returns Strict VBox status code.
15403 *
15404 * @param pVCpu The cross context virtual CPU structure.
15405 * @param cbInstr The instruction length in bytes.
15406 *
15407 * @remarks Not all of the state needs to be synced in.
15408 */
15409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15410{
15411 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15412 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15413
15414 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15415 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15416 Assert(!pVCpu->iem.s.cActiveMappings);
15417 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15418}
15419
15420
15421/**
15422 * Interface for HM and EM to emulate the RDTSC instruction.
15423 *
15424 * @returns Strict VBox status code.
15425 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15426 *
15427 * @param pVCpu The cross context virtual CPU structure.
15428 * @param cbInstr The instruction length in bytes.
15429 *
15430 * @remarks Not all of the state needs to be synced in.
15431 */
15432VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15433{
15434 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15435 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15436
15437 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15438 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15439 Assert(!pVCpu->iem.s.cActiveMappings);
15440 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15441}
15442
15443
15444/**
15445 * Interface for HM and EM to emulate the RDTSCP instruction.
15446 *
15447 * @returns Strict VBox status code.
15448 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15449 *
15450 * @param pVCpu The cross context virtual CPU structure.
15451 * @param cbInstr The instruction length in bytes.
15452 *
15453 * @remarks Not all of the state needs to be synced in. Recommended
15454 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15455 */
15456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15457{
15458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15459 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15460
15461 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15463 Assert(!pVCpu->iem.s.cActiveMappings);
15464 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15465}
15466
15467
15468/**
15469 * Interface for HM and EM to emulate the RDMSR instruction.
15470 *
15471 * @returns Strict VBox status code.
15472 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15473 *
15474 * @param pVCpu The cross context virtual CPU structure.
15475 * @param cbInstr The instruction length in bytes.
15476 *
15477 * @remarks Not all of the state needs to be synced in. Requires RCX and
15478 * (currently) all MSRs.
15479 */
15480VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15481{
15482 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15483 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15484
15485 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15486 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15487 Assert(!pVCpu->iem.s.cActiveMappings);
15488 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15489}
15490
15491
15492/**
15493 * Interface for HM and EM to emulate the WRMSR instruction.
15494 *
15495 * @returns Strict VBox status code.
15496 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15497 *
15498 * @param pVCpu The cross context virtual CPU structure.
15499 * @param cbInstr The instruction length in bytes.
15500 *
15501 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15502 * and (currently) all MSRs.
15503 */
15504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15505{
15506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15507 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15508 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15509
15510 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15511 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15512 Assert(!pVCpu->iem.s.cActiveMappings);
15513 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15514}
15515
15516
15517/**
15518 * Interface for HM and EM to emulate the MONITOR instruction.
15519 *
15520 * @returns Strict VBox status code.
15521 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15522 *
15523 * @param pVCpu The cross context virtual CPU structure.
15524 * @param cbInstr The instruction length in bytes.
15525 *
15526 * @remarks Not all of the state needs to be synced in.
15527 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15528 * are used.
15529 */
15530VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15531{
15532 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15533 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15534
15535 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15536 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15537 Assert(!pVCpu->iem.s.cActiveMappings);
15538 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15539}
15540
15541
15542/**
15543 * Interface for HM and EM to emulate the MWAIT instruction.
15544 *
15545 * @returns Strict VBox status code.
15546 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15547 *
15548 * @param pVCpu The cross context virtual CPU structure.
15549 * @param cbInstr The instruction length in bytes.
15550 *
15551 * @remarks Not all of the state needs to be synced in.
15552 */
15553VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15554{
15555 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15556
15557 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15558 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15559 Assert(!pVCpu->iem.s.cActiveMappings);
15560 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15561}
15562
15563
15564/**
15565 * Interface for HM and EM to emulate the HLT instruction.
15566 *
15567 * @returns Strict VBox status code.
15568 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15569 *
15570 * @param pVCpu The cross context virtual CPU structure.
15571 * @param cbInstr The instruction length in bytes.
15572 *
15573 * @remarks Not all of the state needs to be synced in.
15574 */
15575VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15576{
15577 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15578
15579 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15580 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15581 Assert(!pVCpu->iem.s.cActiveMappings);
15582 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15583}
15584
15585
15586/**
15587 * Checks if IEM is in the process of delivering an event (interrupt or
15588 * exception).
15589 *
15590 * @returns true if we're in the process of raising an interrupt or exception,
15591 * false otherwise.
15592 * @param pVCpu The cross context virtual CPU structure.
15593 * @param puVector Where to store the vector associated with the
15594 * currently delivered event, optional.
15595 * @param pfFlags Where to store th event delivery flags (see
15596 * IEM_XCPT_FLAGS_XXX), optional.
15597 * @param puErr Where to store the error code associated with the
15598 * event, optional.
15599 * @param puCr2 Where to store the CR2 associated with the event,
15600 * optional.
15601 * @remarks The caller should check the flags to determine if the error code and
15602 * CR2 are valid for the event.
15603 */
15604VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15605{
15606 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15607 if (fRaisingXcpt)
15608 {
15609 if (puVector)
15610 *puVector = pVCpu->iem.s.uCurXcpt;
15611 if (pfFlags)
15612 *pfFlags = pVCpu->iem.s.fCurXcpt;
15613 if (puErr)
15614 *puErr = pVCpu->iem.s.uCurXcptErr;
15615 if (puCr2)
15616 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15617 }
15618 return fRaisingXcpt;
15619}
15620
15621#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15622
15623/**
15624 * Interface for HM and EM to emulate the CLGI instruction.
15625 *
15626 * @returns Strict VBox status code.
15627 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15628 * @param cbInstr The instruction length in bytes.
15629 * @thread EMT(pVCpu)
15630 */
15631VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15632{
15633 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15634
15635 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15636 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15637 Assert(!pVCpu->iem.s.cActiveMappings);
15638 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15639}
15640
15641
15642/**
15643 * Interface for HM and EM to emulate the STGI instruction.
15644 *
15645 * @returns Strict VBox status code.
15646 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15647 * @param cbInstr The instruction length in bytes.
15648 * @thread EMT(pVCpu)
15649 */
15650VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15651{
15652 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15653
15654 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15655 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15656 Assert(!pVCpu->iem.s.cActiveMappings);
15657 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15658}
15659
15660
15661/**
15662 * Interface for HM and EM to emulate the VMLOAD instruction.
15663 *
15664 * @returns Strict VBox status code.
15665 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15666 * @param cbInstr The instruction length in bytes.
15667 * @thread EMT(pVCpu)
15668 */
15669VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15670{
15671 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15672
15673 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15674 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15675 Assert(!pVCpu->iem.s.cActiveMappings);
15676 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15677}
15678
15679
15680/**
15681 * Interface for HM and EM to emulate the VMSAVE instruction.
15682 *
15683 * @returns Strict VBox status code.
15684 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15685 * @param cbInstr The instruction length in bytes.
15686 * @thread EMT(pVCpu)
15687 */
15688VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15689{
15690 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15691
15692 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15693 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15694 Assert(!pVCpu->iem.s.cActiveMappings);
15695 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15696}
15697
15698
15699/**
15700 * Interface for HM and EM to emulate the INVLPGA instruction.
15701 *
15702 * @returns Strict VBox status code.
15703 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15704 * @param cbInstr The instruction length in bytes.
15705 * @thread EMT(pVCpu)
15706 */
15707VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15708{
15709 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15710
15711 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15712 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15713 Assert(!pVCpu->iem.s.cActiveMappings);
15714 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15715}
15716
15717
15718/**
15719 * Interface for HM and EM to emulate the VMRUN instruction.
15720 *
15721 * @returns Strict VBox status code.
15722 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15723 * @param cbInstr The instruction length in bytes.
15724 * @thread EMT(pVCpu)
15725 */
15726VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15727{
15728 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15729 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15730
15731 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15732 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15733 Assert(!pVCpu->iem.s.cActiveMappings);
15734 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15735}
15736
15737
15738/**
15739 * Interface for HM and EM to emulate \#VMEXIT.
15740 *
15741 * @returns Strict VBox status code.
15742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15743 * @param uExitCode The exit code.
15744 * @param uExitInfo1 The exit info. 1 field.
15745 * @param uExitInfo2 The exit info. 2 field.
15746 * @thread EMT(pVCpu)
15747 */
15748VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15749{
15750 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15751 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15752 if (pVCpu->iem.s.cActiveMappings)
15753 iemMemRollback(pVCpu);
15754 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15755}
15756
15757#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15758
15759#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15760
15761/**
15762 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15763 *
15764 * @returns Strict VBox status code.
15765 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15766 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15767 * the x2APIC device.
15768 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15769 *
15770 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15771 * @param idMsr The MSR being read.
15772 * @param pu64Value Pointer to the value being written or where to store the
15773 * value being read.
15774 * @param fWrite Whether this is an MSR write or read access.
15775 * @thread EMT(pVCpu)
15776 */
15777VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15778{
15779 Assert(pu64Value);
15780
15781 VBOXSTRICTRC rcStrict;
15782 if (!fWrite)
15783 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15784 else
15785 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15786 if (pVCpu->iem.s.cActiveMappings)
15787 iemMemRollback(pVCpu);
15788 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15789
15790}
15791
15792
15793/**
15794 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15795 *
15796 * @returns Strict VBox status code.
15797 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15798 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15799 *
15800 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15801 * @param offAccess The offset of the register being accessed (within the
15802 * APIC-access page).
15803 * @param cbAccess The size of the access in bytes.
15804 * @param pvData Pointer to the data being written or where to store the data
15805 * being read.
15806 * @param fWrite Whether this is a write or read access.
15807 * @thread EMT(pVCpu)
15808 */
15809VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
15810 bool fWrite)
15811{
15812 Assert(pvData);
15813
15814 /** @todo NSTVMX: Unfortunately, the caller has no idea about instruction fetch
15815 * accesses, so we only use read/write here. Maybe in the future the PGM
15816 * physical handler will be extended to include this information? */
15817 uint32_t const fAccess = fWrite ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
15818 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbAccess, pvData, fAccess);
15819 if (pVCpu->iem.s.cActiveMappings)
15820 iemMemRollback(pVCpu);
15821 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15822}
15823
15824
15825/**
15826 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15827 * VM-exit.
15828 *
15829 * @returns Strict VBox status code.
15830 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15831 * @thread EMT(pVCpu)
15832 */
15833VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15834{
15835 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15836 if (pVCpu->iem.s.cActiveMappings)
15837 iemMemRollback(pVCpu);
15838 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15839}
15840
15841
15842/**
15843 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15844 *
15845 * @returns Strict VBox status code.
15846 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15847 * @thread EMT(pVCpu)
15848 */
15849VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15850{
15851 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15852 if (pVCpu->iem.s.cActiveMappings)
15853 iemMemRollback(pVCpu);
15854 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15855}
15856
15857
15858/**
15859 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15860 *
15861 * @returns Strict VBox status code.
15862 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15863 * @param uVector The external interrupt vector (pass 0 if the external
15864 * interrupt is still pending).
15865 * @param fIntPending Whether the external interrupt is pending or
15866 * acknowdledged in the interrupt controller.
15867 * @thread EMT(pVCpu)
15868 */
15869VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15870{
15871 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15872 if (pVCpu->iem.s.cActiveMappings)
15873 iemMemRollback(pVCpu);
15874 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15875}
15876
15877
15878/**
15879 * Interface for HM and EM to emulate VM-exit due to NMIs.
15880 *
15881 * @returns Strict VBox status code.
15882 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15883 * @thread EMT(pVCpu)
15884 */
15885VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmi(PVMCPU pVCpu)
15886{
15887 VBOXSTRICTRC rcStrict = iemVmxVmexitNmi(pVCpu);
15888 if (pVCpu->iem.s.cActiveMappings)
15889 iemMemRollback(pVCpu);
15890 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15891}
15892
15893
15894/**
15895 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15896 *
15897 * @returns Strict VBox status code.
15898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15899 * @param uVector The SIPI vector.
15900 * @thread EMT(pVCpu)
15901 */
15902VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15903{
15904 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15905 if (pVCpu->iem.s.cActiveMappings)
15906 iemMemRollback(pVCpu);
15907 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15908}
15909
15910
15911/**
15912 * Interface for HM and EM to emulate a VM-exit.
15913 *
15914 * If a specialized version of a VM-exit handler exists, that must be used instead.
15915 *
15916 * @returns Strict VBox status code.
15917 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15918 * @param uExitReason The VM-exit reason.
15919 * @thread EMT(pVCpu)
15920 *
15921 * @remarks It is the responsibility of the caller to ensure VM-exit qualification
15922 * is updated prior to calling this function!
15923 */
15924VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPU pVCpu, uint32_t uExitReason)
15925{
15926 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason);
15927 if (pVCpu->iem.s.cActiveMappings)
15928 iemMemRollback(pVCpu);
15929 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15930}
15931
15932
15933/**
15934 * Interface for HM and EM to emulate the VMREAD instruction.
15935 *
15936 * @returns Strict VBox status code.
15937 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15938 * @param pExitInfo Pointer to the VM-exit information struct.
15939 * @thread EMT(pVCpu)
15940 */
15941VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15942{
15943 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15944 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15945 Assert(pExitInfo);
15946
15947 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15948
15949 VBOXSTRICTRC rcStrict;
15950 uint8_t const cbInstr = pExitInfo->cbInstr;
15951 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15952 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15953 {
15954 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15955 {
15956 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15957 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15958 }
15959 else
15960 {
15961 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15962 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15963 }
15964 }
15965 else
15966 {
15967 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15968 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15969 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15970 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15971 }
15972 Assert(!pVCpu->iem.s.cActiveMappings);
15973 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15974}
15975
15976
15977/**
15978 * Interface for HM and EM to emulate the VMWRITE instruction.
15979 *
15980 * @returns Strict VBox status code.
15981 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15982 * @param pExitInfo Pointer to the VM-exit information struct.
15983 * @thread EMT(pVCpu)
15984 */
15985VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15986{
15987 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15988 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15989 Assert(pExitInfo);
15990
15991 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15992
15993 uint64_t u64Val;
15994 uint8_t iEffSeg;
15995 IEMMODE enmEffAddrMode;
15996 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15997 {
15998 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15999 iEffSeg = UINT8_MAX;
16000 enmEffAddrMode = UINT8_MAX;
16001 }
16002 else
16003 {
16004 u64Val = pExitInfo->GCPtrEffAddr;
16005 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16006 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
16007 }
16008 uint8_t const cbInstr = pExitInfo->cbInstr;
16009 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16010 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
16011 Assert(!pVCpu->iem.s.cActiveMappings);
16012 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16013}
16014
16015
16016/**
16017 * Interface for HM and EM to emulate the VMPTRLD instruction.
16018 *
16019 * @returns Strict VBox status code.
16020 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16021 * @param pExitInfo Pointer to the VM-exit information struct.
16022 * @thread EMT(pVCpu)
16023 */
16024VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16025{
16026 Assert(pExitInfo);
16027 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16028 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16029
16030 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16031
16032 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16033 uint8_t const cbInstr = pExitInfo->cbInstr;
16034 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16035 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16036 Assert(!pVCpu->iem.s.cActiveMappings);
16037 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16038}
16039
16040
16041/**
16042 * Interface for HM and EM to emulate the VMPTRST instruction.
16043 *
16044 * @returns Strict VBox status code.
16045 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16046 * @param pExitInfo Pointer to the VM-exit information struct.
16047 * @thread EMT(pVCpu)
16048 */
16049VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16050{
16051 Assert(pExitInfo);
16052 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16053 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16054
16055 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16056
16057 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16058 uint8_t const cbInstr = pExitInfo->cbInstr;
16059 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16060 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16061 Assert(!pVCpu->iem.s.cActiveMappings);
16062 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16063}
16064
16065
16066/**
16067 * Interface for HM and EM to emulate the VMCLEAR instruction.
16068 *
16069 * @returns Strict VBox status code.
16070 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16071 * @param pExitInfo Pointer to the VM-exit information struct.
16072 * @thread EMT(pVCpu)
16073 */
16074VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16075{
16076 Assert(pExitInfo);
16077 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16078 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16079
16080 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16081
16082 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16083 uint8_t const cbInstr = pExitInfo->cbInstr;
16084 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16085 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16086 Assert(!pVCpu->iem.s.cActiveMappings);
16087 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16088}
16089
16090
16091/**
16092 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16093 *
16094 * @returns Strict VBox status code.
16095 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16096 * @param cbInstr The instruction length in bytes.
16097 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16098 * VMXINSTRID_VMRESUME).
16099 * @thread EMT(pVCpu)
16100 */
16101VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16102{
16103 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16104 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16105
16106 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16107 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16108 Assert(!pVCpu->iem.s.cActiveMappings);
16109 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16110}
16111
16112
16113/**
16114 * Interface for HM and EM to emulate the VMXON instruction.
16115 *
16116 * @returns Strict VBox status code.
16117 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16118 * @param pExitInfo Pointer to the VM-exit information struct.
16119 * @thread EMT(pVCpu)
16120 */
16121VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16122{
16123 Assert(pExitInfo);
16124 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16125 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16126
16127 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16128
16129 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16130 uint8_t const cbInstr = pExitInfo->cbInstr;
16131 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16132 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16133 Assert(!pVCpu->iem.s.cActiveMappings);
16134 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16135}
16136
16137
16138/**
16139 * Interface for HM and EM to emulate the VMXOFF instruction.
16140 *
16141 * @returns Strict VBox status code.
16142 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16143 * @param cbInstr The instruction length in bytes.
16144 * @thread EMT(pVCpu)
16145 */
16146VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16147{
16148 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16149 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16150
16151 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16152 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16153 Assert(!pVCpu->iem.s.cActiveMappings);
16154 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16155}
16156
16157
16158/**
16159 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16160 *
16161 * @remarks The @a pvUser argument is currently unused.
16162 */
16163PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16164 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16165 PGMACCESSORIGIN enmOrigin, void *pvUser)
16166{
16167 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16168
16169 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16170 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16171 {
16172 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16173 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16174
16175 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16176 * Currently they will go through as read accesses. */
16177 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16178 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16179 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16180 if (RT_FAILURE(rcStrict))
16181 return rcStrict;
16182
16183 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16184 return VINF_SUCCESS;
16185 }
16186
16187 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16188 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16189 if (RT_FAILURE(rc))
16190 return rc;
16191
16192 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16193 return VINF_PGM_HANDLER_DO_DEFAULT;
16194}
16195
16196#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16197
16198#ifdef IN_RING3
16199
16200/**
16201 * Handles the unlikely and probably fatal merge cases.
16202 *
16203 * @returns Merged status code.
16204 * @param rcStrict Current EM status code.
16205 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16206 * with @a rcStrict.
16207 * @param iMemMap The memory mapping index. For error reporting only.
16208 * @param pVCpu The cross context virtual CPU structure of the calling
16209 * thread, for error reporting only.
16210 */
16211DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16212 unsigned iMemMap, PVMCPU pVCpu)
16213{
16214 if (RT_FAILURE_NP(rcStrict))
16215 return rcStrict;
16216
16217 if (RT_FAILURE_NP(rcStrictCommit))
16218 return rcStrictCommit;
16219
16220 if (rcStrict == rcStrictCommit)
16221 return rcStrictCommit;
16222
16223 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16224 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16225 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16226 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16227 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16228 return VERR_IOM_FF_STATUS_IPE;
16229}
16230
16231
16232/**
16233 * Helper for IOMR3ProcessForceFlag.
16234 *
16235 * @returns Merged status code.
16236 * @param rcStrict Current EM status code.
16237 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16238 * with @a rcStrict.
16239 * @param iMemMap The memory mapping index. For error reporting only.
16240 * @param pVCpu The cross context virtual CPU structure of the calling
16241 * thread, for error reporting only.
16242 */
16243DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16244{
16245 /* Simple. */
16246 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16247 return rcStrictCommit;
16248
16249 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16250 return rcStrict;
16251
16252 /* EM scheduling status codes. */
16253 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16254 && rcStrict <= VINF_EM_LAST))
16255 {
16256 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16257 && rcStrictCommit <= VINF_EM_LAST))
16258 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16259 }
16260
16261 /* Unlikely */
16262 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16263}
16264
16265
16266/**
16267 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16268 *
16269 * @returns Merge between @a rcStrict and what the commit operation returned.
16270 * @param pVM The cross context VM structure.
16271 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16272 * @param rcStrict The status code returned by ring-0 or raw-mode.
16273 */
16274VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16275{
16276 /*
16277 * Reset the pending commit.
16278 */
16279 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16280 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16281 ("%#x %#x %#x\n",
16282 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16283 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16284
16285 /*
16286 * Commit the pending bounce buffers (usually just one).
16287 */
16288 unsigned cBufs = 0;
16289 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16290 while (iMemMap-- > 0)
16291 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16292 {
16293 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16294 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16295 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16296
16297 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16298 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16299 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16300
16301 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16302 {
16303 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16304 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16305 pbBuf,
16306 cbFirst,
16307 PGMACCESSORIGIN_IEM);
16308 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16309 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16310 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16311 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16312 }
16313
16314 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16315 {
16316 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16317 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16318 pbBuf + cbFirst,
16319 cbSecond,
16320 PGMACCESSORIGIN_IEM);
16321 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16322 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16323 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16324 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16325 }
16326 cBufs++;
16327 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16328 }
16329
16330 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16331 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16332 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16333 pVCpu->iem.s.cActiveMappings = 0;
16334 return rcStrict;
16335}
16336
16337#endif /* IN_RING3 */
16338
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette