VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 76811

Last change on this file since 76811 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 644.3 KB
Line 
1/* $Id: IEMAll.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handle for triple faults.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
450 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
987IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
988IEM_STATIC VBOXSTRICTRC iemVmxVmexitMtf(PVMCPU pVCpu);
989IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
990IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
991IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
992IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
993#endif
994
995#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
996IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
997IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
998#endif
999
1000
1001/**
1002 * Sets the pass up status.
1003 *
1004 * @returns VINF_SUCCESS.
1005 * @param pVCpu The cross context virtual CPU structure of the
1006 * calling thread.
1007 * @param rcPassUp The pass up status. Must be informational.
1008 * VINF_SUCCESS is not allowed.
1009 */
1010IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1011{
1012 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1013
1014 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1015 if (rcOldPassUp == VINF_SUCCESS)
1016 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1017 /* If both are EM scheduling codes, use EM priority rules. */
1018 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1019 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1020 {
1021 if (rcPassUp < rcOldPassUp)
1022 {
1023 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1024 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1025 }
1026 else
1027 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1028 }
1029 /* Override EM scheduling with specific status code. */
1030 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1031 {
1032 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1033 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1034 }
1035 /* Don't override specific status code, first come first served. */
1036 else
1037 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1038 return VINF_SUCCESS;
1039}
1040
1041
1042/**
1043 * Calculates the CPU mode.
1044 *
1045 * This is mainly for updating IEMCPU::enmCpuMode.
1046 *
1047 * @returns CPU mode.
1048 * @param pVCpu The cross context virtual CPU structure of the
1049 * calling thread.
1050 */
1051DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1052{
1053 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1054 return IEMMODE_64BIT;
1055 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1056 return IEMMODE_32BIT;
1057 return IEMMODE_16BIT;
1058}
1059
1060
1061/**
1062 * Initializes the execution state.
1063 *
1064 * @param pVCpu The cross context virtual CPU structure of the
1065 * calling thread.
1066 * @param fBypassHandlers Whether to bypass access handlers.
1067 *
1068 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1069 * side-effects in strict builds.
1070 */
1071DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1072{
1073 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1074 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1075
1076#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1084 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1085#endif
1086
1087#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1088 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1089#endif
1090 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1091 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1092#ifdef VBOX_STRICT
1093 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1094 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1095 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1096 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1097 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1098 pVCpu->iem.s.uRexReg = 127;
1099 pVCpu->iem.s.uRexB = 127;
1100 pVCpu->iem.s.offModRm = 127;
1101 pVCpu->iem.s.uRexIndex = 127;
1102 pVCpu->iem.s.iEffSeg = 127;
1103 pVCpu->iem.s.idxPrefix = 127;
1104 pVCpu->iem.s.uVex3rdReg = 127;
1105 pVCpu->iem.s.uVexLength = 127;
1106 pVCpu->iem.s.fEvexStuff = 127;
1107 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1108# ifdef IEM_WITH_CODE_TLB
1109 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1110 pVCpu->iem.s.pbInstrBuf = NULL;
1111 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1112 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1113 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1114 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1115# else
1116 pVCpu->iem.s.offOpcode = 127;
1117 pVCpu->iem.s.cbOpcode = 127;
1118# endif
1119#endif
1120
1121 pVCpu->iem.s.cActiveMappings = 0;
1122 pVCpu->iem.s.iNextMapping = 0;
1123 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1124 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1125#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1126 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1127 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1128 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1129 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1130 if (!pVCpu->iem.s.fInPatchCode)
1131 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1132#endif
1133}
1134
1135#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1136/**
1137 * Performs a minimal reinitialization of the execution state.
1138 *
1139 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1140 * 'world-switch' types operations on the CPU. Currently only nested
1141 * hardware-virtualization uses it.
1142 *
1143 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1144 */
1145IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1146{
1147 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1148 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1149
1150 pVCpu->iem.s.uCpl = uCpl;
1151 pVCpu->iem.s.enmCpuMode = enmMode;
1152 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1153 pVCpu->iem.s.enmEffAddrMode = enmMode;
1154 if (enmMode != IEMMODE_64BIT)
1155 {
1156 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1157 pVCpu->iem.s.enmEffOpSize = enmMode;
1158 }
1159 else
1160 {
1161 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1162 pVCpu->iem.s.enmEffOpSize = enmMode;
1163 }
1164 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1165#ifndef IEM_WITH_CODE_TLB
1166 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1167 pVCpu->iem.s.offOpcode = 0;
1168 pVCpu->iem.s.cbOpcode = 0;
1169#endif
1170 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1171}
1172#endif
1173
1174/**
1175 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1176 *
1177 * @param pVCpu The cross context virtual CPU structure of the
1178 * calling thread.
1179 */
1180DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1181{
1182 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1183#ifdef VBOX_STRICT
1184# ifdef IEM_WITH_CODE_TLB
1185 NOREF(pVCpu);
1186# else
1187 pVCpu->iem.s.cbOpcode = 0;
1188# endif
1189#else
1190 NOREF(pVCpu);
1191#endif
1192}
1193
1194
1195/**
1196 * Initializes the decoder state.
1197 *
1198 * iemReInitDecoder is mostly a copy of this function.
1199 *
1200 * @param pVCpu The cross context virtual CPU structure of the
1201 * calling thread.
1202 * @param fBypassHandlers Whether to bypass access handlers.
1203 */
1204DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1205{
1206 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1207 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1208
1209#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1218#endif
1219
1220#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1221 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1222#endif
1223 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1224 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1225 pVCpu->iem.s.enmCpuMode = enmMode;
1226 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1227 pVCpu->iem.s.enmEffAddrMode = enmMode;
1228 if (enmMode != IEMMODE_64BIT)
1229 {
1230 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1231 pVCpu->iem.s.enmEffOpSize = enmMode;
1232 }
1233 else
1234 {
1235 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1236 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1237 }
1238 pVCpu->iem.s.fPrefixes = 0;
1239 pVCpu->iem.s.uRexReg = 0;
1240 pVCpu->iem.s.uRexB = 0;
1241 pVCpu->iem.s.uRexIndex = 0;
1242 pVCpu->iem.s.idxPrefix = 0;
1243 pVCpu->iem.s.uVex3rdReg = 0;
1244 pVCpu->iem.s.uVexLength = 0;
1245 pVCpu->iem.s.fEvexStuff = 0;
1246 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1247#ifdef IEM_WITH_CODE_TLB
1248 pVCpu->iem.s.pbInstrBuf = NULL;
1249 pVCpu->iem.s.offInstrNextByte = 0;
1250 pVCpu->iem.s.offCurInstrStart = 0;
1251# ifdef VBOX_STRICT
1252 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1253 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1254 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1255# endif
1256#else
1257 pVCpu->iem.s.offOpcode = 0;
1258 pVCpu->iem.s.cbOpcode = 0;
1259#endif
1260 pVCpu->iem.s.offModRm = 0;
1261 pVCpu->iem.s.cActiveMappings = 0;
1262 pVCpu->iem.s.iNextMapping = 0;
1263 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1264 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1265#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1266 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1267 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1268 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1269 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1270 if (!pVCpu->iem.s.fInPatchCode)
1271 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1272#endif
1273
1274#ifdef DBGFTRACE_ENABLED
1275 switch (enmMode)
1276 {
1277 case IEMMODE_64BIT:
1278 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1279 break;
1280 case IEMMODE_32BIT:
1281 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1282 break;
1283 case IEMMODE_16BIT:
1284 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1285 break;
1286 }
1287#endif
1288}
1289
1290
1291/**
1292 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1293 *
1294 * This is mostly a copy of iemInitDecoder.
1295 *
1296 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1297 */
1298DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1299{
1300 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1301
1302#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1311#endif
1312
1313 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1314 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1315 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1316 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1317 pVCpu->iem.s.enmEffAddrMode = enmMode;
1318 if (enmMode != IEMMODE_64BIT)
1319 {
1320 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1321 pVCpu->iem.s.enmEffOpSize = enmMode;
1322 }
1323 else
1324 {
1325 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1326 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1327 }
1328 pVCpu->iem.s.fPrefixes = 0;
1329 pVCpu->iem.s.uRexReg = 0;
1330 pVCpu->iem.s.uRexB = 0;
1331 pVCpu->iem.s.uRexIndex = 0;
1332 pVCpu->iem.s.idxPrefix = 0;
1333 pVCpu->iem.s.uVex3rdReg = 0;
1334 pVCpu->iem.s.uVexLength = 0;
1335 pVCpu->iem.s.fEvexStuff = 0;
1336 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1337#ifdef IEM_WITH_CODE_TLB
1338 if (pVCpu->iem.s.pbInstrBuf)
1339 {
1340 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1341 - pVCpu->iem.s.uInstrBufPc;
1342 if (off < pVCpu->iem.s.cbInstrBufTotal)
1343 {
1344 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1345 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1346 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1347 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1348 else
1349 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1350 }
1351 else
1352 {
1353 pVCpu->iem.s.pbInstrBuf = NULL;
1354 pVCpu->iem.s.offInstrNextByte = 0;
1355 pVCpu->iem.s.offCurInstrStart = 0;
1356 pVCpu->iem.s.cbInstrBuf = 0;
1357 pVCpu->iem.s.cbInstrBufTotal = 0;
1358 }
1359 }
1360 else
1361 {
1362 pVCpu->iem.s.offInstrNextByte = 0;
1363 pVCpu->iem.s.offCurInstrStart = 0;
1364 pVCpu->iem.s.cbInstrBuf = 0;
1365 pVCpu->iem.s.cbInstrBufTotal = 0;
1366 }
1367#else
1368 pVCpu->iem.s.cbOpcode = 0;
1369 pVCpu->iem.s.offOpcode = 0;
1370#endif
1371 pVCpu->iem.s.offModRm = 0;
1372 Assert(pVCpu->iem.s.cActiveMappings == 0);
1373 pVCpu->iem.s.iNextMapping = 0;
1374 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1375 Assert(pVCpu->iem.s.fBypassHandlers == false);
1376#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1377 if (!pVCpu->iem.s.fInPatchCode)
1378 { /* likely */ }
1379 else
1380 {
1381 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1382 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1383 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1384 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1385 if (!pVCpu->iem.s.fInPatchCode)
1386 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1387 }
1388#endif
1389
1390#ifdef DBGFTRACE_ENABLED
1391 switch (enmMode)
1392 {
1393 case IEMMODE_64BIT:
1394 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1395 break;
1396 case IEMMODE_32BIT:
1397 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1398 break;
1399 case IEMMODE_16BIT:
1400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1401 break;
1402 }
1403#endif
1404}
1405
1406
1407
1408/**
1409 * Prefetch opcodes the first time when starting executing.
1410 *
1411 * @returns Strict VBox status code.
1412 * @param pVCpu The cross context virtual CPU structure of the
1413 * calling thread.
1414 * @param fBypassHandlers Whether to bypass access handlers.
1415 */
1416IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1417{
1418 iemInitDecoder(pVCpu, fBypassHandlers);
1419
1420#ifdef IEM_WITH_CODE_TLB
1421 /** @todo Do ITLB lookup here. */
1422
1423#else /* !IEM_WITH_CODE_TLB */
1424
1425 /*
1426 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1427 *
1428 * First translate CS:rIP to a physical address.
1429 */
1430 uint32_t cbToTryRead;
1431 RTGCPTR GCPtrPC;
1432 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1433 {
1434 cbToTryRead = PAGE_SIZE;
1435 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1436 if (IEM_IS_CANONICAL(GCPtrPC))
1437 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1438 else
1439 return iemRaiseGeneralProtectionFault0(pVCpu);
1440 }
1441 else
1442 {
1443 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1444 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1445 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1446 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1447 else
1448 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1449 if (cbToTryRead) { /* likely */ }
1450 else /* overflowed */
1451 {
1452 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1453 cbToTryRead = UINT32_MAX;
1454 }
1455 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1456 Assert(GCPtrPC <= UINT32_MAX);
1457 }
1458
1459# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1460 /* Allow interpretation of patch manager code blocks since they can for
1461 instance throw #PFs for perfectly good reasons. */
1462 if (pVCpu->iem.s.fInPatchCode)
1463 {
1464 size_t cbRead = 0;
1465 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1466 AssertRCReturn(rc, rc);
1467 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1468 return VINF_SUCCESS;
1469 }
1470# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1471
1472 RTGCPHYS GCPhys;
1473 uint64_t fFlags;
1474 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1475 if (RT_SUCCESS(rc)) { /* probable */ }
1476 else
1477 {
1478 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1479 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1480 }
1481 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1482 else
1483 {
1484 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1485 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1486 }
1487 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1488 else
1489 {
1490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1491 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1492 }
1493 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1494 /** @todo Check reserved bits and such stuff. PGM is better at doing
1495 * that, so do it when implementing the guest virtual address
1496 * TLB... */
1497
1498 /*
1499 * Read the bytes at this address.
1500 */
1501 PVM pVM = pVCpu->CTX_SUFF(pVM);
1502# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1503 size_t cbActual;
1504 if ( PATMIsEnabled(pVM)
1505 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1506 {
1507 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1508 Assert(cbActual > 0);
1509 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1510 }
1511 else
1512# endif
1513 {
1514 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1515 if (cbToTryRead > cbLeftOnPage)
1516 cbToTryRead = cbLeftOnPage;
1517 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1518 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1519
1520 if (!pVCpu->iem.s.fBypassHandlers)
1521 {
1522 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1523 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1524 { /* likely */ }
1525 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1526 {
1527 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1528 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1529 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1530 }
1531 else
1532 {
1533 Log((RT_SUCCESS(rcStrict)
1534 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1535 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1536 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1537 return rcStrict;
1538 }
1539 }
1540 else
1541 {
1542 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1543 if (RT_SUCCESS(rc))
1544 { /* likely */ }
1545 else
1546 {
1547 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1548 GCPtrPC, GCPhys, rc, cbToTryRead));
1549 return rc;
1550 }
1551 }
1552 pVCpu->iem.s.cbOpcode = cbToTryRead;
1553 }
1554#endif /* !IEM_WITH_CODE_TLB */
1555 return VINF_SUCCESS;
1556}
1557
1558
1559/**
1560 * Invalidates the IEM TLBs.
1561 *
1562 * This is called internally as well as by PGM when moving GC mappings.
1563 *
1564 * @returns
1565 * @param pVCpu The cross context virtual CPU structure of the calling
1566 * thread.
1567 * @param fVmm Set when PGM calls us with a remapping.
1568 */
1569VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1570{
1571#ifdef IEM_WITH_CODE_TLB
1572 pVCpu->iem.s.cbInstrBufTotal = 0;
1573 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1574 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1575 { /* very likely */ }
1576 else
1577 {
1578 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1579 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1580 while (i-- > 0)
1581 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1582 }
1583#endif
1584
1585#ifdef IEM_WITH_DATA_TLB
1586 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1587 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1588 { /* very likely */ }
1589 else
1590 {
1591 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1592 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1593 while (i-- > 0)
1594 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1595 }
1596#endif
1597 NOREF(pVCpu); NOREF(fVmm);
1598}
1599
1600
1601/**
1602 * Invalidates a page in the TLBs.
1603 *
1604 * @param pVCpu The cross context virtual CPU structure of the calling
1605 * thread.
1606 * @param GCPtr The address of the page to invalidate
1607 */
1608VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1609{
1610#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1611 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1612 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1613 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1614 uintptr_t idx = (uint8_t)GCPtr;
1615
1616# ifdef IEM_WITH_CODE_TLB
1617 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1618 {
1619 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1620 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1621 pVCpu->iem.s.cbInstrBufTotal = 0;
1622 }
1623# endif
1624
1625# ifdef IEM_WITH_DATA_TLB
1626 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1627 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1628# endif
1629#else
1630 NOREF(pVCpu); NOREF(GCPtr);
1631#endif
1632}
1633
1634
1635/**
1636 * Invalidates the host physical aspects of the IEM TLBs.
1637 *
1638 * This is called internally as well as by PGM when moving GC mappings.
1639 *
1640 * @param pVCpu The cross context virtual CPU structure of the calling
1641 * thread.
1642 */
1643VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1644{
1645#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1646 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1647
1648# ifdef IEM_WITH_CODE_TLB
1649 pVCpu->iem.s.cbInstrBufTotal = 0;
1650# endif
1651 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1652 if (uTlbPhysRev != 0)
1653 {
1654 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1655 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1656 }
1657 else
1658 {
1659 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1660 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1661
1662 unsigned i;
1663# ifdef IEM_WITH_CODE_TLB
1664 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1665 while (i-- > 0)
1666 {
1667 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1668 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1669 }
1670# endif
1671# ifdef IEM_WITH_DATA_TLB
1672 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1673 while (i-- > 0)
1674 {
1675 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1676 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1677 }
1678# endif
1679 }
1680#else
1681 NOREF(pVCpu);
1682#endif
1683}
1684
1685
1686/**
1687 * Invalidates the host physical aspects of the IEM TLBs.
1688 *
1689 * This is called internally as well as by PGM when moving GC mappings.
1690 *
1691 * @param pVM The cross context VM structure.
1692 *
1693 * @remarks Caller holds the PGM lock.
1694 */
1695VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1696{
1697 RT_NOREF_PV(pVM);
1698}
1699
1700#ifdef IEM_WITH_CODE_TLB
1701
1702/**
1703 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1704 * failure and jumps.
1705 *
1706 * We end up here for a number of reasons:
1707 * - pbInstrBuf isn't yet initialized.
1708 * - Advancing beyond the buffer boundrary (e.g. cross page).
1709 * - Advancing beyond the CS segment limit.
1710 * - Fetching from non-mappable page (e.g. MMIO).
1711 *
1712 * @param pVCpu The cross context virtual CPU structure of the
1713 * calling thread.
1714 * @param pvDst Where to return the bytes.
1715 * @param cbDst Number of bytes to read.
1716 *
1717 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1718 */
1719IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1720{
1721#ifdef IN_RING3
1722 for (;;)
1723 {
1724 Assert(cbDst <= 8);
1725 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1726
1727 /*
1728 * We might have a partial buffer match, deal with that first to make the
1729 * rest simpler. This is the first part of the cross page/buffer case.
1730 */
1731 if (pVCpu->iem.s.pbInstrBuf != NULL)
1732 {
1733 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1734 {
1735 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1736 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1737 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1738
1739 cbDst -= cbCopy;
1740 pvDst = (uint8_t *)pvDst + cbCopy;
1741 offBuf += cbCopy;
1742 pVCpu->iem.s.offInstrNextByte += offBuf;
1743 }
1744 }
1745
1746 /*
1747 * Check segment limit, figuring how much we're allowed to access at this point.
1748 *
1749 * We will fault immediately if RIP is past the segment limit / in non-canonical
1750 * territory. If we do continue, there are one or more bytes to read before we
1751 * end up in trouble and we need to do that first before faulting.
1752 */
1753 RTGCPTR GCPtrFirst;
1754 uint32_t cbMaxRead;
1755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1756 {
1757 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1758 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1759 { /* likely */ }
1760 else
1761 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1762 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1763 }
1764 else
1765 {
1766 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1767 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1768 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1769 { /* likely */ }
1770 else
1771 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1772 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1773 if (cbMaxRead != 0)
1774 { /* likely */ }
1775 else
1776 {
1777 /* Overflowed because address is 0 and limit is max. */
1778 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1779 cbMaxRead = X86_PAGE_SIZE;
1780 }
1781 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1782 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1783 if (cbMaxRead2 < cbMaxRead)
1784 cbMaxRead = cbMaxRead2;
1785 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1786 }
1787
1788 /*
1789 * Get the TLB entry for this piece of code.
1790 */
1791 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1792 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1793 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1794 if (pTlbe->uTag == uTag)
1795 {
1796 /* likely when executing lots of code, otherwise unlikely */
1797# ifdef VBOX_WITH_STATISTICS
1798 pVCpu->iem.s.CodeTlb.cTlbHits++;
1799# endif
1800 }
1801 else
1802 {
1803 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1804# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1805 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1806 {
1807 pTlbe->uTag = uTag;
1808 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1809 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1810 pTlbe->GCPhys = NIL_RTGCPHYS;
1811 pTlbe->pbMappingR3 = NULL;
1812 }
1813 else
1814# endif
1815 {
1816 RTGCPHYS GCPhys;
1817 uint64_t fFlags;
1818 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1819 if (RT_FAILURE(rc))
1820 {
1821 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1822 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1823 }
1824
1825 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1826 pTlbe->uTag = uTag;
1827 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1828 pTlbe->GCPhys = GCPhys;
1829 pTlbe->pbMappingR3 = NULL;
1830 }
1831 }
1832
1833 /*
1834 * Check TLB page table level access flags.
1835 */
1836 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1837 {
1838 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1839 {
1840 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1841 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1842 }
1843 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1844 {
1845 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1846 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1847 }
1848 }
1849
1850# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1851 /*
1852 * Allow interpretation of patch manager code blocks since they can for
1853 * instance throw #PFs for perfectly good reasons.
1854 */
1855 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1856 { /* no unlikely */ }
1857 else
1858 {
1859 /** @todo Could be optimized this a little in ring-3 if we liked. */
1860 size_t cbRead = 0;
1861 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1862 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1863 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1864 return;
1865 }
1866# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1867
1868 /*
1869 * Look up the physical page info if necessary.
1870 */
1871 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1872 { /* not necessary */ }
1873 else
1874 {
1875 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1876 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1877 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1878 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1879 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1880 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1881 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1882 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1883 }
1884
1885# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1886 /*
1887 * Try do a direct read using the pbMappingR3 pointer.
1888 */
1889 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1890 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1891 {
1892 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1893 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1894 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1895 {
1896 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1897 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1898 }
1899 else
1900 {
1901 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1902 Assert(cbInstr < cbMaxRead);
1903 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1904 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1905 }
1906 if (cbDst <= cbMaxRead)
1907 {
1908 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1909 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1910 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1911 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1912 return;
1913 }
1914 pVCpu->iem.s.pbInstrBuf = NULL;
1915
1916 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1917 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1918 }
1919 else
1920# endif
1921#if 0
1922 /*
1923 * If there is no special read handling, so we can read a bit more and
1924 * put it in the prefetch buffer.
1925 */
1926 if ( cbDst < cbMaxRead
1927 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1928 {
1929 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1930 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1931 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1932 { /* likely */ }
1933 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1934 {
1935 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1936 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1937 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1938 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1939 }
1940 else
1941 {
1942 Log((RT_SUCCESS(rcStrict)
1943 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1944 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1945 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1946 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1947 }
1948 }
1949 /*
1950 * Special read handling, so only read exactly what's needed.
1951 * This is a highly unlikely scenario.
1952 */
1953 else
1954#endif
1955 {
1956 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1957 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1958 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1959 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1960 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1961 { /* likely */ }
1962 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1963 {
1964 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1965 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1966 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1967 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1968 }
1969 else
1970 {
1971 Log((RT_SUCCESS(rcStrict)
1972 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1973 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1974 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1975 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1976 }
1977 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1978 if (cbToRead == cbDst)
1979 return;
1980 }
1981
1982 /*
1983 * More to read, loop.
1984 */
1985 cbDst -= cbMaxRead;
1986 pvDst = (uint8_t *)pvDst + cbMaxRead;
1987 }
1988#else
1989 RT_NOREF(pvDst, cbDst);
1990 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1991#endif
1992}
1993
1994#else
1995
1996/**
1997 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1998 * exception if it fails.
1999 *
2000 * @returns Strict VBox status code.
2001 * @param pVCpu The cross context virtual CPU structure of the
2002 * calling thread.
2003 * @param cbMin The minimum number of bytes relative offOpcode
2004 * that must be read.
2005 */
2006IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2007{
2008 /*
2009 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2010 *
2011 * First translate CS:rIP to a physical address.
2012 */
2013 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2014 uint32_t cbToTryRead;
2015 RTGCPTR GCPtrNext;
2016 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2017 {
2018 cbToTryRead = PAGE_SIZE;
2019 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2020 if (!IEM_IS_CANONICAL(GCPtrNext))
2021 return iemRaiseGeneralProtectionFault0(pVCpu);
2022 }
2023 else
2024 {
2025 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2026 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2027 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2028 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2029 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2030 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2031 if (!cbToTryRead) /* overflowed */
2032 {
2033 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2034 cbToTryRead = UINT32_MAX;
2035 /** @todo check out wrapping around the code segment. */
2036 }
2037 if (cbToTryRead < cbMin - cbLeft)
2038 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2039 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2040 }
2041
2042 /* Only read up to the end of the page, and make sure we don't read more
2043 than the opcode buffer can hold. */
2044 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2045 if (cbToTryRead > cbLeftOnPage)
2046 cbToTryRead = cbLeftOnPage;
2047 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2048 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2049/** @todo r=bird: Convert assertion into undefined opcode exception? */
2050 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2051
2052# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2053 /* Allow interpretation of patch manager code blocks since they can for
2054 instance throw #PFs for perfectly good reasons. */
2055 if (pVCpu->iem.s.fInPatchCode)
2056 {
2057 size_t cbRead = 0;
2058 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2059 AssertRCReturn(rc, rc);
2060 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2061 return VINF_SUCCESS;
2062 }
2063# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2064
2065 RTGCPHYS GCPhys;
2066 uint64_t fFlags;
2067 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2068 if (RT_FAILURE(rc))
2069 {
2070 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2071 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2072 }
2073 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2074 {
2075 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2076 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2077 }
2078 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2079 {
2080 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2081 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2082 }
2083 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2084 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2085 /** @todo Check reserved bits and such stuff. PGM is better at doing
2086 * that, so do it when implementing the guest virtual address
2087 * TLB... */
2088
2089 /*
2090 * Read the bytes at this address.
2091 *
2092 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2093 * and since PATM should only patch the start of an instruction there
2094 * should be no need to check again here.
2095 */
2096 if (!pVCpu->iem.s.fBypassHandlers)
2097 {
2098 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2099 cbToTryRead, PGMACCESSORIGIN_IEM);
2100 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2101 { /* likely */ }
2102 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2103 {
2104 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2105 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2106 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2107 }
2108 else
2109 {
2110 Log((RT_SUCCESS(rcStrict)
2111 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2112 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2113 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2114 return rcStrict;
2115 }
2116 }
2117 else
2118 {
2119 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2120 if (RT_SUCCESS(rc))
2121 { /* likely */ }
2122 else
2123 {
2124 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2125 return rc;
2126 }
2127 }
2128 pVCpu->iem.s.cbOpcode += cbToTryRead;
2129 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2130
2131 return VINF_SUCCESS;
2132}
2133
2134#endif /* !IEM_WITH_CODE_TLB */
2135#ifndef IEM_WITH_SETJMP
2136
2137/**
2138 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2139 *
2140 * @returns Strict VBox status code.
2141 * @param pVCpu The cross context virtual CPU structure of the
2142 * calling thread.
2143 * @param pb Where to return the opcode byte.
2144 */
2145DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2146{
2147 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2148 if (rcStrict == VINF_SUCCESS)
2149 {
2150 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2151 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2152 pVCpu->iem.s.offOpcode = offOpcode + 1;
2153 }
2154 else
2155 *pb = 0;
2156 return rcStrict;
2157}
2158
2159
2160/**
2161 * Fetches the next opcode byte.
2162 *
2163 * @returns Strict VBox status code.
2164 * @param pVCpu The cross context virtual CPU structure of the
2165 * calling thread.
2166 * @param pu8 Where to return the opcode byte.
2167 */
2168DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2169{
2170 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2171 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2172 {
2173 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2174 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2175 return VINF_SUCCESS;
2176 }
2177 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2178}
2179
2180#else /* IEM_WITH_SETJMP */
2181
2182/**
2183 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2184 *
2185 * @returns The opcode byte.
2186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2187 */
2188DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2189{
2190# ifdef IEM_WITH_CODE_TLB
2191 uint8_t u8;
2192 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2193 return u8;
2194# else
2195 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2196 if (rcStrict == VINF_SUCCESS)
2197 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2198 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2199# endif
2200}
2201
2202
2203/**
2204 * Fetches the next opcode byte, longjmp on error.
2205 *
2206 * @returns The opcode byte.
2207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2208 */
2209DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2210{
2211# ifdef IEM_WITH_CODE_TLB
2212 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2213 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2214 if (RT_LIKELY( pbBuf != NULL
2215 && offBuf < pVCpu->iem.s.cbInstrBuf))
2216 {
2217 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2218 return pbBuf[offBuf];
2219 }
2220# else
2221 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2222 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2223 {
2224 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2225 return pVCpu->iem.s.abOpcode[offOpcode];
2226 }
2227# endif
2228 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2229}
2230
2231#endif /* IEM_WITH_SETJMP */
2232
2233/**
2234 * Fetches the next opcode byte, returns automatically on failure.
2235 *
2236 * @param a_pu8 Where to return the opcode byte.
2237 * @remark Implicitly references pVCpu.
2238 */
2239#ifndef IEM_WITH_SETJMP
2240# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2241 do \
2242 { \
2243 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2244 if (rcStrict2 == VINF_SUCCESS) \
2245 { /* likely */ } \
2246 else \
2247 return rcStrict2; \
2248 } while (0)
2249#else
2250# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2251#endif /* IEM_WITH_SETJMP */
2252
2253
2254#ifndef IEM_WITH_SETJMP
2255/**
2256 * Fetches the next signed byte from the opcode stream.
2257 *
2258 * @returns Strict VBox status code.
2259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2260 * @param pi8 Where to return the signed byte.
2261 */
2262DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2263{
2264 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2265}
2266#endif /* !IEM_WITH_SETJMP */
2267
2268
2269/**
2270 * Fetches the next signed byte from the opcode stream, returning automatically
2271 * on failure.
2272 *
2273 * @param a_pi8 Where to return the signed byte.
2274 * @remark Implicitly references pVCpu.
2275 */
2276#ifndef IEM_WITH_SETJMP
2277# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2278 do \
2279 { \
2280 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2281 if (rcStrict2 != VINF_SUCCESS) \
2282 return rcStrict2; \
2283 } while (0)
2284#else /* IEM_WITH_SETJMP */
2285# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2286
2287#endif /* IEM_WITH_SETJMP */
2288
2289#ifndef IEM_WITH_SETJMP
2290
2291/**
2292 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2293 *
2294 * @returns Strict VBox status code.
2295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2296 * @param pu16 Where to return the opcode dword.
2297 */
2298DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2299{
2300 uint8_t u8;
2301 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2302 if (rcStrict == VINF_SUCCESS)
2303 *pu16 = (int8_t)u8;
2304 return rcStrict;
2305}
2306
2307
2308/**
2309 * Fetches the next signed byte from the opcode stream, extending it to
2310 * unsigned 16-bit.
2311 *
2312 * @returns Strict VBox status code.
2313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2314 * @param pu16 Where to return the unsigned word.
2315 */
2316DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2317{
2318 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2319 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2320 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2321
2322 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2323 pVCpu->iem.s.offOpcode = offOpcode + 1;
2324 return VINF_SUCCESS;
2325}
2326
2327#endif /* !IEM_WITH_SETJMP */
2328
2329/**
2330 * Fetches the next signed byte from the opcode stream and sign-extending it to
2331 * a word, returning automatically on failure.
2332 *
2333 * @param a_pu16 Where to return the word.
2334 * @remark Implicitly references pVCpu.
2335 */
2336#ifndef IEM_WITH_SETJMP
2337# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2338 do \
2339 { \
2340 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2341 if (rcStrict2 != VINF_SUCCESS) \
2342 return rcStrict2; \
2343 } while (0)
2344#else
2345# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2346#endif
2347
2348#ifndef IEM_WITH_SETJMP
2349
2350/**
2351 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2352 *
2353 * @returns Strict VBox status code.
2354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2355 * @param pu32 Where to return the opcode dword.
2356 */
2357DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2358{
2359 uint8_t u8;
2360 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2361 if (rcStrict == VINF_SUCCESS)
2362 *pu32 = (int8_t)u8;
2363 return rcStrict;
2364}
2365
2366
2367/**
2368 * Fetches the next signed byte from the opcode stream, extending it to
2369 * unsigned 32-bit.
2370 *
2371 * @returns Strict VBox status code.
2372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2373 * @param pu32 Where to return the unsigned dword.
2374 */
2375DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2376{
2377 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2378 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2379 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2380
2381 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2382 pVCpu->iem.s.offOpcode = offOpcode + 1;
2383 return VINF_SUCCESS;
2384}
2385
2386#endif /* !IEM_WITH_SETJMP */
2387
2388/**
2389 * Fetches the next signed byte from the opcode stream and sign-extending it to
2390 * a word, returning automatically on failure.
2391 *
2392 * @param a_pu32 Where to return the word.
2393 * @remark Implicitly references pVCpu.
2394 */
2395#ifndef IEM_WITH_SETJMP
2396#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2397 do \
2398 { \
2399 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2400 if (rcStrict2 != VINF_SUCCESS) \
2401 return rcStrict2; \
2402 } while (0)
2403#else
2404# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2405#endif
2406
2407#ifndef IEM_WITH_SETJMP
2408
2409/**
2410 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2411 *
2412 * @returns Strict VBox status code.
2413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2414 * @param pu64 Where to return the opcode qword.
2415 */
2416DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2417{
2418 uint8_t u8;
2419 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2420 if (rcStrict == VINF_SUCCESS)
2421 *pu64 = (int8_t)u8;
2422 return rcStrict;
2423}
2424
2425
2426/**
2427 * Fetches the next signed byte from the opcode stream, extending it to
2428 * unsigned 64-bit.
2429 *
2430 * @returns Strict VBox status code.
2431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2432 * @param pu64 Where to return the unsigned qword.
2433 */
2434DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2435{
2436 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2437 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2438 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2439
2440 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2441 pVCpu->iem.s.offOpcode = offOpcode + 1;
2442 return VINF_SUCCESS;
2443}
2444
2445#endif /* !IEM_WITH_SETJMP */
2446
2447
2448/**
2449 * Fetches the next signed byte from the opcode stream and sign-extending it to
2450 * a word, returning automatically on failure.
2451 *
2452 * @param a_pu64 Where to return the word.
2453 * @remark Implicitly references pVCpu.
2454 */
2455#ifndef IEM_WITH_SETJMP
2456# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2457 do \
2458 { \
2459 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2460 if (rcStrict2 != VINF_SUCCESS) \
2461 return rcStrict2; \
2462 } while (0)
2463#else
2464# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2465#endif
2466
2467
2468#ifndef IEM_WITH_SETJMP
2469/**
2470 * Fetches the next opcode byte.
2471 *
2472 * @returns Strict VBox status code.
2473 * @param pVCpu The cross context virtual CPU structure of the
2474 * calling thread.
2475 * @param pu8 Where to return the opcode byte.
2476 */
2477DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2478{
2479 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2480 pVCpu->iem.s.offModRm = offOpcode;
2481 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2482 {
2483 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2484 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2485 return VINF_SUCCESS;
2486 }
2487 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2488}
2489#else /* IEM_WITH_SETJMP */
2490/**
2491 * Fetches the next opcode byte, longjmp on error.
2492 *
2493 * @returns The opcode byte.
2494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2495 */
2496DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2497{
2498# ifdef IEM_WITH_CODE_TLB
2499 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2500 pVCpu->iem.s.offModRm = offBuf;
2501 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2502 if (RT_LIKELY( pbBuf != NULL
2503 && offBuf < pVCpu->iem.s.cbInstrBuf))
2504 {
2505 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2506 return pbBuf[offBuf];
2507 }
2508# else
2509 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2510 pVCpu->iem.s.offModRm = offOpcode;
2511 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2512 {
2513 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2514 return pVCpu->iem.s.abOpcode[offOpcode];
2515 }
2516# endif
2517 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2518}
2519#endif /* IEM_WITH_SETJMP */
2520
2521/**
2522 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2523 * on failure.
2524 *
2525 * Will note down the position of the ModR/M byte for VT-x exits.
2526 *
2527 * @param a_pbRm Where to return the RM opcode byte.
2528 * @remark Implicitly references pVCpu.
2529 */
2530#ifndef IEM_WITH_SETJMP
2531# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2532 do \
2533 { \
2534 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2535 if (rcStrict2 == VINF_SUCCESS) \
2536 { /* likely */ } \
2537 else \
2538 return rcStrict2; \
2539 } while (0)
2540#else
2541# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2542#endif /* IEM_WITH_SETJMP */
2543
2544
2545#ifndef IEM_WITH_SETJMP
2546
2547/**
2548 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2549 *
2550 * @returns Strict VBox status code.
2551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2552 * @param pu16 Where to return the opcode word.
2553 */
2554DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2555{
2556 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2557 if (rcStrict == VINF_SUCCESS)
2558 {
2559 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2560# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2561 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2562# else
2563 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2564# endif
2565 pVCpu->iem.s.offOpcode = offOpcode + 2;
2566 }
2567 else
2568 *pu16 = 0;
2569 return rcStrict;
2570}
2571
2572
2573/**
2574 * Fetches the next opcode word.
2575 *
2576 * @returns Strict VBox status code.
2577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2578 * @param pu16 Where to return the opcode word.
2579 */
2580DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2581{
2582 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2583 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2584 {
2585 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2586# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2587 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2588# else
2589 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2590# endif
2591 return VINF_SUCCESS;
2592 }
2593 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2594}
2595
2596#else /* IEM_WITH_SETJMP */
2597
2598/**
2599 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2600 *
2601 * @returns The opcode word.
2602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2603 */
2604DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2605{
2606# ifdef IEM_WITH_CODE_TLB
2607 uint16_t u16;
2608 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2609 return u16;
2610# else
2611 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2612 if (rcStrict == VINF_SUCCESS)
2613 {
2614 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2615 pVCpu->iem.s.offOpcode += 2;
2616# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2617 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2618# else
2619 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2620# endif
2621 }
2622 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2623# endif
2624}
2625
2626
2627/**
2628 * Fetches the next opcode word, longjmp on error.
2629 *
2630 * @returns The opcode word.
2631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2632 */
2633DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2634{
2635# ifdef IEM_WITH_CODE_TLB
2636 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2637 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2638 if (RT_LIKELY( pbBuf != NULL
2639 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2640 {
2641 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2642# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2643 return *(uint16_t const *)&pbBuf[offBuf];
2644# else
2645 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2646# endif
2647 }
2648# else
2649 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2650 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2651 {
2652 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2653# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2654 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2655# else
2656 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2657# endif
2658 }
2659# endif
2660 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2661}
2662
2663#endif /* IEM_WITH_SETJMP */
2664
2665
2666/**
2667 * Fetches the next opcode word, returns automatically on failure.
2668 *
2669 * @param a_pu16 Where to return the opcode word.
2670 * @remark Implicitly references pVCpu.
2671 */
2672#ifndef IEM_WITH_SETJMP
2673# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2674 do \
2675 { \
2676 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2677 if (rcStrict2 != VINF_SUCCESS) \
2678 return rcStrict2; \
2679 } while (0)
2680#else
2681# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2682#endif
2683
2684#ifndef IEM_WITH_SETJMP
2685
2686/**
2687 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2688 *
2689 * @returns Strict VBox status code.
2690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2691 * @param pu32 Where to return the opcode double word.
2692 */
2693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2694{
2695 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2696 if (rcStrict == VINF_SUCCESS)
2697 {
2698 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2699 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2700 pVCpu->iem.s.offOpcode = offOpcode + 2;
2701 }
2702 else
2703 *pu32 = 0;
2704 return rcStrict;
2705}
2706
2707
2708/**
2709 * Fetches the next opcode word, zero extending it to a double word.
2710 *
2711 * @returns Strict VBox status code.
2712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2713 * @param pu32 Where to return the opcode double word.
2714 */
2715DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2716{
2717 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2718 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2719 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2720
2721 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2722 pVCpu->iem.s.offOpcode = offOpcode + 2;
2723 return VINF_SUCCESS;
2724}
2725
2726#endif /* !IEM_WITH_SETJMP */
2727
2728
2729/**
2730 * Fetches the next opcode word and zero extends it to a double word, returns
2731 * automatically on failure.
2732 *
2733 * @param a_pu32 Where to return the opcode double word.
2734 * @remark Implicitly references pVCpu.
2735 */
2736#ifndef IEM_WITH_SETJMP
2737# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2738 do \
2739 { \
2740 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2741 if (rcStrict2 != VINF_SUCCESS) \
2742 return rcStrict2; \
2743 } while (0)
2744#else
2745# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2746#endif
2747
2748#ifndef IEM_WITH_SETJMP
2749
2750/**
2751 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2752 *
2753 * @returns Strict VBox status code.
2754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2755 * @param pu64 Where to return the opcode quad word.
2756 */
2757DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2758{
2759 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2760 if (rcStrict == VINF_SUCCESS)
2761 {
2762 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2763 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2764 pVCpu->iem.s.offOpcode = offOpcode + 2;
2765 }
2766 else
2767 *pu64 = 0;
2768 return rcStrict;
2769}
2770
2771
2772/**
2773 * Fetches the next opcode word, zero extending it to a quad word.
2774 *
2775 * @returns Strict VBox status code.
2776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2777 * @param pu64 Where to return the opcode quad word.
2778 */
2779DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2780{
2781 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2782 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2783 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2784
2785 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2786 pVCpu->iem.s.offOpcode = offOpcode + 2;
2787 return VINF_SUCCESS;
2788}
2789
2790#endif /* !IEM_WITH_SETJMP */
2791
2792/**
2793 * Fetches the next opcode word and zero extends it to a quad word, returns
2794 * automatically on failure.
2795 *
2796 * @param a_pu64 Where to return the opcode quad word.
2797 * @remark Implicitly references pVCpu.
2798 */
2799#ifndef IEM_WITH_SETJMP
2800# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2801 do \
2802 { \
2803 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2804 if (rcStrict2 != VINF_SUCCESS) \
2805 return rcStrict2; \
2806 } while (0)
2807#else
2808# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2809#endif
2810
2811
2812#ifndef IEM_WITH_SETJMP
2813/**
2814 * Fetches the next signed word from the opcode stream.
2815 *
2816 * @returns Strict VBox status code.
2817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2818 * @param pi16 Where to return the signed word.
2819 */
2820DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2821{
2822 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2823}
2824#endif /* !IEM_WITH_SETJMP */
2825
2826
2827/**
2828 * Fetches the next signed word from the opcode stream, returning automatically
2829 * on failure.
2830 *
2831 * @param a_pi16 Where to return the signed word.
2832 * @remark Implicitly references pVCpu.
2833 */
2834#ifndef IEM_WITH_SETJMP
2835# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2836 do \
2837 { \
2838 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2839 if (rcStrict2 != VINF_SUCCESS) \
2840 return rcStrict2; \
2841 } while (0)
2842#else
2843# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2844#endif
2845
2846#ifndef IEM_WITH_SETJMP
2847
2848/**
2849 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2850 *
2851 * @returns Strict VBox status code.
2852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2853 * @param pu32 Where to return the opcode dword.
2854 */
2855DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2856{
2857 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2858 if (rcStrict == VINF_SUCCESS)
2859 {
2860 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2861# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2862 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2863# else
2864 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2865 pVCpu->iem.s.abOpcode[offOpcode + 1],
2866 pVCpu->iem.s.abOpcode[offOpcode + 2],
2867 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2868# endif
2869 pVCpu->iem.s.offOpcode = offOpcode + 4;
2870 }
2871 else
2872 *pu32 = 0;
2873 return rcStrict;
2874}
2875
2876
2877/**
2878 * Fetches the next opcode dword.
2879 *
2880 * @returns Strict VBox status code.
2881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2882 * @param pu32 Where to return the opcode double word.
2883 */
2884DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2885{
2886 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2887 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2888 {
2889 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2890# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2891 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2892# else
2893 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2894 pVCpu->iem.s.abOpcode[offOpcode + 1],
2895 pVCpu->iem.s.abOpcode[offOpcode + 2],
2896 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2897# endif
2898 return VINF_SUCCESS;
2899 }
2900 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2901}
2902
2903#else /* !IEM_WITH_SETJMP */
2904
2905/**
2906 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2907 *
2908 * @returns The opcode dword.
2909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2910 */
2911DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2912{
2913# ifdef IEM_WITH_CODE_TLB
2914 uint32_t u32;
2915 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2916 return u32;
2917# else
2918 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2919 if (rcStrict == VINF_SUCCESS)
2920 {
2921 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2922 pVCpu->iem.s.offOpcode = offOpcode + 4;
2923# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2924 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2925# else
2926 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2927 pVCpu->iem.s.abOpcode[offOpcode + 1],
2928 pVCpu->iem.s.abOpcode[offOpcode + 2],
2929 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2930# endif
2931 }
2932 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2933# endif
2934}
2935
2936
2937/**
2938 * Fetches the next opcode dword, longjmp on error.
2939 *
2940 * @returns The opcode dword.
2941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2942 */
2943DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2944{
2945# ifdef IEM_WITH_CODE_TLB
2946 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2947 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2948 if (RT_LIKELY( pbBuf != NULL
2949 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2950 {
2951 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2952# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2953 return *(uint32_t const *)&pbBuf[offBuf];
2954# else
2955 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2956 pbBuf[offBuf + 1],
2957 pbBuf[offBuf + 2],
2958 pbBuf[offBuf + 3]);
2959# endif
2960 }
2961# else
2962 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2963 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2964 {
2965 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2966# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2967 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2968# else
2969 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2970 pVCpu->iem.s.abOpcode[offOpcode + 1],
2971 pVCpu->iem.s.abOpcode[offOpcode + 2],
2972 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2973# endif
2974 }
2975# endif
2976 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2977}
2978
2979#endif /* !IEM_WITH_SETJMP */
2980
2981
2982/**
2983 * Fetches the next opcode dword, returns automatically on failure.
2984 *
2985 * @param a_pu32 Where to return the opcode dword.
2986 * @remark Implicitly references pVCpu.
2987 */
2988#ifndef IEM_WITH_SETJMP
2989# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2990 do \
2991 { \
2992 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2993 if (rcStrict2 != VINF_SUCCESS) \
2994 return rcStrict2; \
2995 } while (0)
2996#else
2997# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2998#endif
2999
3000#ifndef IEM_WITH_SETJMP
3001
3002/**
3003 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3004 *
3005 * @returns Strict VBox status code.
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param pu64 Where to return the opcode dword.
3008 */
3009DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3010{
3011 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3012 if (rcStrict == VINF_SUCCESS)
3013 {
3014 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3015 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3016 pVCpu->iem.s.abOpcode[offOpcode + 1],
3017 pVCpu->iem.s.abOpcode[offOpcode + 2],
3018 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3019 pVCpu->iem.s.offOpcode = offOpcode + 4;
3020 }
3021 else
3022 *pu64 = 0;
3023 return rcStrict;
3024}
3025
3026
3027/**
3028 * Fetches the next opcode dword, zero extending it to a quad word.
3029 *
3030 * @returns Strict VBox status code.
3031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3032 * @param pu64 Where to return the opcode quad word.
3033 */
3034DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3035{
3036 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3037 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3038 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3039
3040 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3041 pVCpu->iem.s.abOpcode[offOpcode + 1],
3042 pVCpu->iem.s.abOpcode[offOpcode + 2],
3043 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3044 pVCpu->iem.s.offOpcode = offOpcode + 4;
3045 return VINF_SUCCESS;
3046}
3047
3048#endif /* !IEM_WITH_SETJMP */
3049
3050
3051/**
3052 * Fetches the next opcode dword and zero extends it to a quad word, returns
3053 * automatically on failure.
3054 *
3055 * @param a_pu64 Where to return the opcode quad word.
3056 * @remark Implicitly references pVCpu.
3057 */
3058#ifndef IEM_WITH_SETJMP
3059# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3060 do \
3061 { \
3062 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3063 if (rcStrict2 != VINF_SUCCESS) \
3064 return rcStrict2; \
3065 } while (0)
3066#else
3067# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3068#endif
3069
3070
3071#ifndef IEM_WITH_SETJMP
3072/**
3073 * Fetches the next signed double word from the opcode stream.
3074 *
3075 * @returns Strict VBox status code.
3076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3077 * @param pi32 Where to return the signed double word.
3078 */
3079DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3080{
3081 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3082}
3083#endif
3084
3085/**
3086 * Fetches the next signed double word from the opcode stream, returning
3087 * automatically on failure.
3088 *
3089 * @param a_pi32 Where to return the signed double word.
3090 * @remark Implicitly references pVCpu.
3091 */
3092#ifndef IEM_WITH_SETJMP
3093# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3094 do \
3095 { \
3096 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3097 if (rcStrict2 != VINF_SUCCESS) \
3098 return rcStrict2; \
3099 } while (0)
3100#else
3101# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3102#endif
3103
3104#ifndef IEM_WITH_SETJMP
3105
3106/**
3107 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3108 *
3109 * @returns Strict VBox status code.
3110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3111 * @param pu64 Where to return the opcode qword.
3112 */
3113DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3114{
3115 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3116 if (rcStrict == VINF_SUCCESS)
3117 {
3118 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3119 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3120 pVCpu->iem.s.abOpcode[offOpcode + 1],
3121 pVCpu->iem.s.abOpcode[offOpcode + 2],
3122 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3123 pVCpu->iem.s.offOpcode = offOpcode + 4;
3124 }
3125 else
3126 *pu64 = 0;
3127 return rcStrict;
3128}
3129
3130
3131/**
3132 * Fetches the next opcode dword, sign extending it into a quad word.
3133 *
3134 * @returns Strict VBox status code.
3135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3136 * @param pu64 Where to return the opcode quad word.
3137 */
3138DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3139{
3140 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3141 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3142 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3143
3144 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3145 pVCpu->iem.s.abOpcode[offOpcode + 1],
3146 pVCpu->iem.s.abOpcode[offOpcode + 2],
3147 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3148 *pu64 = i32;
3149 pVCpu->iem.s.offOpcode = offOpcode + 4;
3150 return VINF_SUCCESS;
3151}
3152
3153#endif /* !IEM_WITH_SETJMP */
3154
3155
3156/**
3157 * Fetches the next opcode double word and sign extends it to a quad word,
3158 * returns automatically on failure.
3159 *
3160 * @param a_pu64 Where to return the opcode quad word.
3161 * @remark Implicitly references pVCpu.
3162 */
3163#ifndef IEM_WITH_SETJMP
3164# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3165 do \
3166 { \
3167 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3168 if (rcStrict2 != VINF_SUCCESS) \
3169 return rcStrict2; \
3170 } while (0)
3171#else
3172# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3173#endif
3174
3175#ifndef IEM_WITH_SETJMP
3176
3177/**
3178 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3179 *
3180 * @returns Strict VBox status code.
3181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3182 * @param pu64 Where to return the opcode qword.
3183 */
3184DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3185{
3186 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3187 if (rcStrict == VINF_SUCCESS)
3188 {
3189 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3190# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3191 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3192# else
3193 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3194 pVCpu->iem.s.abOpcode[offOpcode + 1],
3195 pVCpu->iem.s.abOpcode[offOpcode + 2],
3196 pVCpu->iem.s.abOpcode[offOpcode + 3],
3197 pVCpu->iem.s.abOpcode[offOpcode + 4],
3198 pVCpu->iem.s.abOpcode[offOpcode + 5],
3199 pVCpu->iem.s.abOpcode[offOpcode + 6],
3200 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3201# endif
3202 pVCpu->iem.s.offOpcode = offOpcode + 8;
3203 }
3204 else
3205 *pu64 = 0;
3206 return rcStrict;
3207}
3208
3209
3210/**
3211 * Fetches the next opcode qword.
3212 *
3213 * @returns Strict VBox status code.
3214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3215 * @param pu64 Where to return the opcode qword.
3216 */
3217DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3218{
3219 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3220 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3221 {
3222# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3223 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3224# else
3225 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3226 pVCpu->iem.s.abOpcode[offOpcode + 1],
3227 pVCpu->iem.s.abOpcode[offOpcode + 2],
3228 pVCpu->iem.s.abOpcode[offOpcode + 3],
3229 pVCpu->iem.s.abOpcode[offOpcode + 4],
3230 pVCpu->iem.s.abOpcode[offOpcode + 5],
3231 pVCpu->iem.s.abOpcode[offOpcode + 6],
3232 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3233# endif
3234 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3235 return VINF_SUCCESS;
3236 }
3237 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3238}
3239
3240#else /* IEM_WITH_SETJMP */
3241
3242/**
3243 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3244 *
3245 * @returns The opcode qword.
3246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3247 */
3248DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3249{
3250# ifdef IEM_WITH_CODE_TLB
3251 uint64_t u64;
3252 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3253 return u64;
3254# else
3255 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3256 if (rcStrict == VINF_SUCCESS)
3257 {
3258 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3259 pVCpu->iem.s.offOpcode = offOpcode + 8;
3260# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3261 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3262# else
3263 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3264 pVCpu->iem.s.abOpcode[offOpcode + 1],
3265 pVCpu->iem.s.abOpcode[offOpcode + 2],
3266 pVCpu->iem.s.abOpcode[offOpcode + 3],
3267 pVCpu->iem.s.abOpcode[offOpcode + 4],
3268 pVCpu->iem.s.abOpcode[offOpcode + 5],
3269 pVCpu->iem.s.abOpcode[offOpcode + 6],
3270 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3271# endif
3272 }
3273 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3274# endif
3275}
3276
3277
3278/**
3279 * Fetches the next opcode qword, longjmp on error.
3280 *
3281 * @returns The opcode qword.
3282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3283 */
3284DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3285{
3286# ifdef IEM_WITH_CODE_TLB
3287 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3288 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3289 if (RT_LIKELY( pbBuf != NULL
3290 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3291 {
3292 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3293# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3294 return *(uint64_t const *)&pbBuf[offBuf];
3295# else
3296 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3297 pbBuf[offBuf + 1],
3298 pbBuf[offBuf + 2],
3299 pbBuf[offBuf + 3],
3300 pbBuf[offBuf + 4],
3301 pbBuf[offBuf + 5],
3302 pbBuf[offBuf + 6],
3303 pbBuf[offBuf + 7]);
3304# endif
3305 }
3306# else
3307 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3308 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3309 {
3310 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3311# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3312 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3313# else
3314 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3315 pVCpu->iem.s.abOpcode[offOpcode + 1],
3316 pVCpu->iem.s.abOpcode[offOpcode + 2],
3317 pVCpu->iem.s.abOpcode[offOpcode + 3],
3318 pVCpu->iem.s.abOpcode[offOpcode + 4],
3319 pVCpu->iem.s.abOpcode[offOpcode + 5],
3320 pVCpu->iem.s.abOpcode[offOpcode + 6],
3321 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3322# endif
3323 }
3324# endif
3325 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3326}
3327
3328#endif /* IEM_WITH_SETJMP */
3329
3330/**
3331 * Fetches the next opcode quad word, returns automatically on failure.
3332 *
3333 * @param a_pu64 Where to return the opcode quad word.
3334 * @remark Implicitly references pVCpu.
3335 */
3336#ifndef IEM_WITH_SETJMP
3337# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3338 do \
3339 { \
3340 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3341 if (rcStrict2 != VINF_SUCCESS) \
3342 return rcStrict2; \
3343 } while (0)
3344#else
3345# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3346#endif
3347
3348
3349/** @name Misc Worker Functions.
3350 * @{
3351 */
3352
3353/**
3354 * Gets the exception class for the specified exception vector.
3355 *
3356 * @returns The class of the specified exception.
3357 * @param uVector The exception vector.
3358 */
3359IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3360{
3361 Assert(uVector <= X86_XCPT_LAST);
3362 switch (uVector)
3363 {
3364 case X86_XCPT_DE:
3365 case X86_XCPT_TS:
3366 case X86_XCPT_NP:
3367 case X86_XCPT_SS:
3368 case X86_XCPT_GP:
3369 case X86_XCPT_SX: /* AMD only */
3370 return IEMXCPTCLASS_CONTRIBUTORY;
3371
3372 case X86_XCPT_PF:
3373 case X86_XCPT_VE: /* Intel only */
3374 return IEMXCPTCLASS_PAGE_FAULT;
3375
3376 case X86_XCPT_DF:
3377 return IEMXCPTCLASS_DOUBLE_FAULT;
3378 }
3379 return IEMXCPTCLASS_BENIGN;
3380}
3381
3382
3383/**
3384 * Evaluates how to handle an exception caused during delivery of another event
3385 * (exception / interrupt).
3386 *
3387 * @returns How to handle the recursive exception.
3388 * @param pVCpu The cross context virtual CPU structure of the
3389 * calling thread.
3390 * @param fPrevFlags The flags of the previous event.
3391 * @param uPrevVector The vector of the previous event.
3392 * @param fCurFlags The flags of the current exception.
3393 * @param uCurVector The vector of the current exception.
3394 * @param pfXcptRaiseInfo Where to store additional information about the
3395 * exception condition. Optional.
3396 */
3397VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3398 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3399{
3400 /*
3401 * Only CPU exceptions can be raised while delivering other events, software interrupt
3402 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3403 */
3404 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3405 Assert(pVCpu); RT_NOREF(pVCpu);
3406 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3407
3408 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3409 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3410 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3411 {
3412 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3413 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3414 {
3415 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3416 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3417 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3418 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3419 {
3420 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3421 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3422 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3423 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3424 uCurVector, pVCpu->cpum.GstCtx.cr2));
3425 }
3426 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3427 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3428 {
3429 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3430 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3431 }
3432 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3433 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3434 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3435 {
3436 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3437 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3438 }
3439 }
3440 else
3441 {
3442 if (uPrevVector == X86_XCPT_NMI)
3443 {
3444 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3445 if (uCurVector == X86_XCPT_PF)
3446 {
3447 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3448 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3449 }
3450 }
3451 else if ( uPrevVector == X86_XCPT_AC
3452 && uCurVector == X86_XCPT_AC)
3453 {
3454 enmRaise = IEMXCPTRAISE_CPU_HANG;
3455 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3456 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3457 }
3458 }
3459 }
3460 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3461 {
3462 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3463 if (uCurVector == X86_XCPT_PF)
3464 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3465 }
3466 else
3467 {
3468 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3469 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3470 }
3471
3472 if (pfXcptRaiseInfo)
3473 *pfXcptRaiseInfo = fRaiseInfo;
3474 return enmRaise;
3475}
3476
3477
3478/**
3479 * Enters the CPU shutdown state initiated by a triple fault or other
3480 * unrecoverable conditions.
3481 *
3482 * @returns Strict VBox status code.
3483 * @param pVCpu The cross context virtual CPU structure of the
3484 * calling thread.
3485 */
3486IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3487{
3488 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3489 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3490
3491 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3492 {
3493 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3494 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3495 }
3496
3497 RT_NOREF(pVCpu);
3498 return VINF_EM_TRIPLE_FAULT;
3499}
3500
3501
3502/**
3503 * Validates a new SS segment.
3504 *
3505 * @returns VBox strict status code.
3506 * @param pVCpu The cross context virtual CPU structure of the
3507 * calling thread.
3508 * @param NewSS The new SS selctor.
3509 * @param uCpl The CPL to load the stack for.
3510 * @param pDesc Where to return the descriptor.
3511 */
3512IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3513{
3514 /* Null selectors are not allowed (we're not called for dispatching
3515 interrupts with SS=0 in long mode). */
3516 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3517 {
3518 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3519 return iemRaiseTaskSwitchFault0(pVCpu);
3520 }
3521
3522 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3523 if ((NewSS & X86_SEL_RPL) != uCpl)
3524 {
3525 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3526 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3527 }
3528
3529 /*
3530 * Read the descriptor.
3531 */
3532 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3533 if (rcStrict != VINF_SUCCESS)
3534 return rcStrict;
3535
3536 /*
3537 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3538 */
3539 if (!pDesc->Legacy.Gen.u1DescType)
3540 {
3541 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3542 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3543 }
3544
3545 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3546 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3547 {
3548 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3549 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3550 }
3551 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3552 {
3553 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3554 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3555 }
3556
3557 /* Is it there? */
3558 /** @todo testcase: Is this checked before the canonical / limit check below? */
3559 if (!pDesc->Legacy.Gen.u1Present)
3560 {
3561 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3562 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3563 }
3564
3565 return VINF_SUCCESS;
3566}
3567
3568
3569/**
3570 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3571 * not.
3572 *
3573 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3574 */
3575#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3576# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3577#else
3578# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3579#endif
3580
3581/**
3582 * Updates the EFLAGS in the correct manner wrt. PATM.
3583 *
3584 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3585 * @param a_fEfl The new EFLAGS.
3586 */
3587#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3588# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3589#else
3590# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3591#endif
3592
3593
3594/** @} */
3595
3596/** @name Raising Exceptions.
3597 *
3598 * @{
3599 */
3600
3601
3602/**
3603 * Loads the specified stack far pointer from the TSS.
3604 *
3605 * @returns VBox strict status code.
3606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3607 * @param uCpl The CPL to load the stack for.
3608 * @param pSelSS Where to return the new stack segment.
3609 * @param puEsp Where to return the new stack pointer.
3610 */
3611IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3612{
3613 VBOXSTRICTRC rcStrict;
3614 Assert(uCpl < 4);
3615
3616 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3617 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3618 {
3619 /*
3620 * 16-bit TSS (X86TSS16).
3621 */
3622 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3623 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3624 {
3625 uint32_t off = uCpl * 4 + 2;
3626 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3627 {
3628 /** @todo check actual access pattern here. */
3629 uint32_t u32Tmp = 0; /* gcc maybe... */
3630 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3631 if (rcStrict == VINF_SUCCESS)
3632 {
3633 *puEsp = RT_LOWORD(u32Tmp);
3634 *pSelSS = RT_HIWORD(u32Tmp);
3635 return VINF_SUCCESS;
3636 }
3637 }
3638 else
3639 {
3640 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3641 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3642 }
3643 break;
3644 }
3645
3646 /*
3647 * 32-bit TSS (X86TSS32).
3648 */
3649 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3650 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3651 {
3652 uint32_t off = uCpl * 8 + 4;
3653 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3654 {
3655/** @todo check actual access pattern here. */
3656 uint64_t u64Tmp;
3657 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3658 if (rcStrict == VINF_SUCCESS)
3659 {
3660 *puEsp = u64Tmp & UINT32_MAX;
3661 *pSelSS = (RTSEL)(u64Tmp >> 32);
3662 return VINF_SUCCESS;
3663 }
3664 }
3665 else
3666 {
3667 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3668 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3669 }
3670 break;
3671 }
3672
3673 default:
3674 AssertFailed();
3675 rcStrict = VERR_IEM_IPE_4;
3676 break;
3677 }
3678
3679 *puEsp = 0; /* make gcc happy */
3680 *pSelSS = 0; /* make gcc happy */
3681 return rcStrict;
3682}
3683
3684
3685/**
3686 * Loads the specified stack pointer from the 64-bit TSS.
3687 *
3688 * @returns VBox strict status code.
3689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3690 * @param uCpl The CPL to load the stack for.
3691 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3692 * @param puRsp Where to return the new stack pointer.
3693 */
3694IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3695{
3696 Assert(uCpl < 4);
3697 Assert(uIst < 8);
3698 *puRsp = 0; /* make gcc happy */
3699
3700 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3701 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3702
3703 uint32_t off;
3704 if (uIst)
3705 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3706 else
3707 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3708 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3709 {
3710 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3711 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3712 }
3713
3714 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3715}
3716
3717
3718/**
3719 * Adjust the CPU state according to the exception being raised.
3720 *
3721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3722 * @param u8Vector The exception that has been raised.
3723 */
3724DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3725{
3726 switch (u8Vector)
3727 {
3728 case X86_XCPT_DB:
3729 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3730 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3731 break;
3732 /** @todo Read the AMD and Intel exception reference... */
3733 }
3734}
3735
3736
3737/**
3738 * Implements exceptions and interrupts for real mode.
3739 *
3740 * @returns VBox strict status code.
3741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3742 * @param cbInstr The number of bytes to offset rIP by in the return
3743 * address.
3744 * @param u8Vector The interrupt / exception vector number.
3745 * @param fFlags The flags.
3746 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3747 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3748 */
3749IEM_STATIC VBOXSTRICTRC
3750iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3751 uint8_t cbInstr,
3752 uint8_t u8Vector,
3753 uint32_t fFlags,
3754 uint16_t uErr,
3755 uint64_t uCr2)
3756{
3757 NOREF(uErr); NOREF(uCr2);
3758 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3759
3760 /*
3761 * Read the IDT entry.
3762 */
3763 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3764 {
3765 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3766 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3767 }
3768 RTFAR16 Idte;
3769 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3770 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3771 {
3772 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3773 return rcStrict;
3774 }
3775
3776 /*
3777 * Push the stack frame.
3778 */
3779 uint16_t *pu16Frame;
3780 uint64_t uNewRsp;
3781 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3782 if (rcStrict != VINF_SUCCESS)
3783 return rcStrict;
3784
3785 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3786#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3787 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3788 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3789 fEfl |= UINT16_C(0xf000);
3790#endif
3791 pu16Frame[2] = (uint16_t)fEfl;
3792 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3793 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3794 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3795 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3796 return rcStrict;
3797
3798 /*
3799 * Load the vector address into cs:ip and make exception specific state
3800 * adjustments.
3801 */
3802 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3803 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3804 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3805 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3806 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3807 pVCpu->cpum.GstCtx.rip = Idte.off;
3808 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3809 IEMMISC_SET_EFL(pVCpu, fEfl);
3810
3811 /** @todo do we actually do this in real mode? */
3812 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3813 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3814
3815 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3816}
3817
3818
3819/**
3820 * Loads a NULL data selector into when coming from V8086 mode.
3821 *
3822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3823 * @param pSReg Pointer to the segment register.
3824 */
3825IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3826{
3827 pSReg->Sel = 0;
3828 pSReg->ValidSel = 0;
3829 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3830 {
3831 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3832 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3833 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3834 }
3835 else
3836 {
3837 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3838 /** @todo check this on AMD-V */
3839 pSReg->u64Base = 0;
3840 pSReg->u32Limit = 0;
3841 }
3842}
3843
3844
3845/**
3846 * Loads a segment selector during a task switch in V8086 mode.
3847 *
3848 * @param pSReg Pointer to the segment register.
3849 * @param uSel The selector value to load.
3850 */
3851IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3852{
3853 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3854 pSReg->Sel = uSel;
3855 pSReg->ValidSel = uSel;
3856 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3857 pSReg->u64Base = uSel << 4;
3858 pSReg->u32Limit = 0xffff;
3859 pSReg->Attr.u = 0xf3;
3860}
3861
3862
3863/**
3864 * Loads a NULL data selector into a selector register, both the hidden and
3865 * visible parts, in protected mode.
3866 *
3867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3868 * @param pSReg Pointer to the segment register.
3869 * @param uRpl The RPL.
3870 */
3871IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3872{
3873 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3874 * data selector in protected mode. */
3875 pSReg->Sel = uRpl;
3876 pSReg->ValidSel = uRpl;
3877 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3878 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3879 {
3880 /* VT-x (Intel 3960x) observed doing something like this. */
3881 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3882 pSReg->u32Limit = UINT32_MAX;
3883 pSReg->u64Base = 0;
3884 }
3885 else
3886 {
3887 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3888 pSReg->u32Limit = 0;
3889 pSReg->u64Base = 0;
3890 }
3891}
3892
3893
3894/**
3895 * Loads a segment selector during a task switch in protected mode.
3896 *
3897 * In this task switch scenario, we would throw \#TS exceptions rather than
3898 * \#GPs.
3899 *
3900 * @returns VBox strict status code.
3901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3902 * @param pSReg Pointer to the segment register.
3903 * @param uSel The new selector value.
3904 *
3905 * @remarks This does _not_ handle CS or SS.
3906 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3907 */
3908IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3909{
3910 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3911
3912 /* Null data selector. */
3913 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3914 {
3915 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3916 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3917 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3918 return VINF_SUCCESS;
3919 }
3920
3921 /* Fetch the descriptor. */
3922 IEMSELDESC Desc;
3923 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3924 if (rcStrict != VINF_SUCCESS)
3925 {
3926 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3927 VBOXSTRICTRC_VAL(rcStrict)));
3928 return rcStrict;
3929 }
3930
3931 /* Must be a data segment or readable code segment. */
3932 if ( !Desc.Legacy.Gen.u1DescType
3933 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3934 {
3935 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3936 Desc.Legacy.Gen.u4Type));
3937 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3938 }
3939
3940 /* Check privileges for data segments and non-conforming code segments. */
3941 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3942 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3943 {
3944 /* The RPL and the new CPL must be less than or equal to the DPL. */
3945 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3946 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3947 {
3948 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3949 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3950 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3951 }
3952 }
3953
3954 /* Is it there? */
3955 if (!Desc.Legacy.Gen.u1Present)
3956 {
3957 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3958 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3959 }
3960
3961 /* The base and limit. */
3962 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3963 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3964
3965 /*
3966 * Ok, everything checked out fine. Now set the accessed bit before
3967 * committing the result into the registers.
3968 */
3969 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3970 {
3971 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3972 if (rcStrict != VINF_SUCCESS)
3973 return rcStrict;
3974 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3975 }
3976
3977 /* Commit */
3978 pSReg->Sel = uSel;
3979 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3980 pSReg->u32Limit = cbLimit;
3981 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3982 pSReg->ValidSel = uSel;
3983 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3984 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3985 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3986
3987 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3988 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3989 return VINF_SUCCESS;
3990}
3991
3992
3993/**
3994 * Performs a task switch.
3995 *
3996 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3997 * caller is responsible for performing the necessary checks (like DPL, TSS
3998 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3999 * reference for JMP, CALL, IRET.
4000 *
4001 * If the task switch is the due to a software interrupt or hardware exception,
4002 * the caller is responsible for validating the TSS selector and descriptor. See
4003 * Intel Instruction reference for INT n.
4004 *
4005 * @returns VBox strict status code.
4006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4007 * @param enmTaskSwitch The cause of the task switch.
4008 * @param uNextEip The EIP effective after the task switch.
4009 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4010 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4011 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4012 * @param SelTSS The TSS selector of the new task.
4013 * @param pNewDescTSS Pointer to the new TSS descriptor.
4014 */
4015IEM_STATIC VBOXSTRICTRC
4016iemTaskSwitch(PVMCPU pVCpu,
4017 IEMTASKSWITCH enmTaskSwitch,
4018 uint32_t uNextEip,
4019 uint32_t fFlags,
4020 uint16_t uErr,
4021 uint64_t uCr2,
4022 RTSEL SelTSS,
4023 PIEMSELDESC pNewDescTSS)
4024{
4025 Assert(!IEM_IS_REAL_MODE(pVCpu));
4026 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4027 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4028
4029 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4030 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4031 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4032 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4033 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4034
4035 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4036 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4037
4038 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4039 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4040
4041 /* Update CR2 in case it's a page-fault. */
4042 /** @todo This should probably be done much earlier in IEM/PGM. See
4043 * @bugref{5653#c49}. */
4044 if (fFlags & IEM_XCPT_FLAGS_CR2)
4045 pVCpu->cpum.GstCtx.cr2 = uCr2;
4046
4047 /*
4048 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4049 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4050 */
4051 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4052 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4053 if (uNewTSSLimit < uNewTSSLimitMin)
4054 {
4055 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4056 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4057 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4058 }
4059
4060 /*
4061 * Task switches in VMX non-root mode always cause task switches.
4062 * The new TSS must have been read and validated (DPL, limits etc.) before a
4063 * task-switch VM-exit commences.
4064 *
4065 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4066 */
4067 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4068 {
4069 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4070 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4071 }
4072
4073 /*
4074 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4075 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4076 */
4077 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4078 {
4079 uint32_t const uExitInfo1 = SelTSS;
4080 uint32_t uExitInfo2 = uErr;
4081 switch (enmTaskSwitch)
4082 {
4083 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4084 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4085 default: break;
4086 }
4087 if (fFlags & IEM_XCPT_FLAGS_ERR)
4088 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4089 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4090 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4091
4092 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4093 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4094 RT_NOREF2(uExitInfo1, uExitInfo2);
4095 }
4096
4097 /*
4098 * Check the current TSS limit. The last written byte to the current TSS during the
4099 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4100 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4101 *
4102 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4103 * end up with smaller than "legal" TSS limits.
4104 */
4105 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4106 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4107 if (uCurTSSLimit < uCurTSSLimitMin)
4108 {
4109 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4110 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4111 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4112 }
4113
4114 /*
4115 * Verify that the new TSS can be accessed and map it. Map only the required contents
4116 * and not the entire TSS.
4117 */
4118 void *pvNewTSS;
4119 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4120 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4121 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4122 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4123 * not perform correct translation if this happens. See Intel spec. 7.2.1
4124 * "Task-State Segment" */
4125 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4126 if (rcStrict != VINF_SUCCESS)
4127 {
4128 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4129 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4130 return rcStrict;
4131 }
4132
4133 /*
4134 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4135 */
4136 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4137 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4138 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4139 {
4140 PX86DESC pDescCurTSS;
4141 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4142 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4143 if (rcStrict != VINF_SUCCESS)
4144 {
4145 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4146 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4147 return rcStrict;
4148 }
4149
4150 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4151 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4152 if (rcStrict != VINF_SUCCESS)
4153 {
4154 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4155 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4156 return rcStrict;
4157 }
4158
4159 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4160 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4161 {
4162 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4163 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4164 u32EFlags &= ~X86_EFL_NT;
4165 }
4166 }
4167
4168 /*
4169 * Save the CPU state into the current TSS.
4170 */
4171 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4172 if (GCPtrNewTSS == GCPtrCurTSS)
4173 {
4174 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4175 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4176 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4177 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4178 pVCpu->cpum.GstCtx.ldtr.Sel));
4179 }
4180 if (fIsNewTSS386)
4181 {
4182 /*
4183 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4184 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4185 */
4186 void *pvCurTSS32;
4187 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4188 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4189 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4190 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4191 if (rcStrict != VINF_SUCCESS)
4192 {
4193 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4194 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4195 return rcStrict;
4196 }
4197
4198 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4199 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4200 pCurTSS32->eip = uNextEip;
4201 pCurTSS32->eflags = u32EFlags;
4202 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4203 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4204 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4205 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4206 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4207 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4208 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4209 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4210 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4211 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4212 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4213 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4214 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4215 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4216
4217 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4218 if (rcStrict != VINF_SUCCESS)
4219 {
4220 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4221 VBOXSTRICTRC_VAL(rcStrict)));
4222 return rcStrict;
4223 }
4224 }
4225 else
4226 {
4227 /*
4228 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4229 */
4230 void *pvCurTSS16;
4231 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4232 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4233 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4234 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4235 if (rcStrict != VINF_SUCCESS)
4236 {
4237 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4238 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4239 return rcStrict;
4240 }
4241
4242 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4243 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4244 pCurTSS16->ip = uNextEip;
4245 pCurTSS16->flags = u32EFlags;
4246 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4247 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4248 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4249 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4250 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4251 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4252 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4253 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4254 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4255 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4256 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4257 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4258
4259 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4260 if (rcStrict != VINF_SUCCESS)
4261 {
4262 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4263 VBOXSTRICTRC_VAL(rcStrict)));
4264 return rcStrict;
4265 }
4266 }
4267
4268 /*
4269 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4270 */
4271 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4272 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4273 {
4274 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4275 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4276 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4277 }
4278
4279 /*
4280 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4281 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4282 */
4283 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4284 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4285 bool fNewDebugTrap;
4286 if (fIsNewTSS386)
4287 {
4288 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4289 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4290 uNewEip = pNewTSS32->eip;
4291 uNewEflags = pNewTSS32->eflags;
4292 uNewEax = pNewTSS32->eax;
4293 uNewEcx = pNewTSS32->ecx;
4294 uNewEdx = pNewTSS32->edx;
4295 uNewEbx = pNewTSS32->ebx;
4296 uNewEsp = pNewTSS32->esp;
4297 uNewEbp = pNewTSS32->ebp;
4298 uNewEsi = pNewTSS32->esi;
4299 uNewEdi = pNewTSS32->edi;
4300 uNewES = pNewTSS32->es;
4301 uNewCS = pNewTSS32->cs;
4302 uNewSS = pNewTSS32->ss;
4303 uNewDS = pNewTSS32->ds;
4304 uNewFS = pNewTSS32->fs;
4305 uNewGS = pNewTSS32->gs;
4306 uNewLdt = pNewTSS32->selLdt;
4307 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4308 }
4309 else
4310 {
4311 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4312 uNewCr3 = 0;
4313 uNewEip = pNewTSS16->ip;
4314 uNewEflags = pNewTSS16->flags;
4315 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4316 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4317 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4318 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4319 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4320 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4321 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4322 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4323 uNewES = pNewTSS16->es;
4324 uNewCS = pNewTSS16->cs;
4325 uNewSS = pNewTSS16->ss;
4326 uNewDS = pNewTSS16->ds;
4327 uNewFS = 0;
4328 uNewGS = 0;
4329 uNewLdt = pNewTSS16->selLdt;
4330 fNewDebugTrap = false;
4331 }
4332
4333 if (GCPtrNewTSS == GCPtrCurTSS)
4334 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4335 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4336
4337 /*
4338 * We're done accessing the new TSS.
4339 */
4340 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4341 if (rcStrict != VINF_SUCCESS)
4342 {
4343 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4344 return rcStrict;
4345 }
4346
4347 /*
4348 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4349 */
4350 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4351 {
4352 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4353 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4354 if (rcStrict != VINF_SUCCESS)
4355 {
4356 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4357 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4358 return rcStrict;
4359 }
4360
4361 /* Check that the descriptor indicates the new TSS is available (not busy). */
4362 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4363 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4364 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4365
4366 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4367 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4368 if (rcStrict != VINF_SUCCESS)
4369 {
4370 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4371 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4372 return rcStrict;
4373 }
4374 }
4375
4376 /*
4377 * From this point on, we're technically in the new task. We will defer exceptions
4378 * until the completion of the task switch but before executing any instructions in the new task.
4379 */
4380 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4381 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4382 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4383 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4384 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4385 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4386 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4387
4388 /* Set the busy bit in TR. */
4389 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4390 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4391 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4392 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4393 {
4394 uNewEflags |= X86_EFL_NT;
4395 }
4396
4397 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4398 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4399 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4400
4401 pVCpu->cpum.GstCtx.eip = uNewEip;
4402 pVCpu->cpum.GstCtx.eax = uNewEax;
4403 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4404 pVCpu->cpum.GstCtx.edx = uNewEdx;
4405 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4406 pVCpu->cpum.GstCtx.esp = uNewEsp;
4407 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4408 pVCpu->cpum.GstCtx.esi = uNewEsi;
4409 pVCpu->cpum.GstCtx.edi = uNewEdi;
4410
4411 uNewEflags &= X86_EFL_LIVE_MASK;
4412 uNewEflags |= X86_EFL_RA1_MASK;
4413 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4414
4415 /*
4416 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4417 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4418 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4419 */
4420 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4421 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4422
4423 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4424 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4425
4426 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4427 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4428
4429 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4430 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4431
4432 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4433 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4434
4435 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4436 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4437 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4438
4439 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4440 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4441 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4442 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4443
4444 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4445 {
4446 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4447 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4448 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4449 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4450 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4451 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4452 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4453 }
4454
4455 /*
4456 * Switch CR3 for the new task.
4457 */
4458 if ( fIsNewTSS386
4459 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4460 {
4461 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4462 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4463 AssertRCSuccessReturn(rc, rc);
4464
4465 /* Inform PGM. */
4466 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4467 AssertRCReturn(rc, rc);
4468 /* ignore informational status codes */
4469
4470 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4471 }
4472
4473 /*
4474 * Switch LDTR for the new task.
4475 */
4476 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4477 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4478 else
4479 {
4480 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4481
4482 IEMSELDESC DescNewLdt;
4483 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4484 if (rcStrict != VINF_SUCCESS)
4485 {
4486 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4487 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4488 return rcStrict;
4489 }
4490 if ( !DescNewLdt.Legacy.Gen.u1Present
4491 || DescNewLdt.Legacy.Gen.u1DescType
4492 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4493 {
4494 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4495 uNewLdt, DescNewLdt.Legacy.u));
4496 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4497 }
4498
4499 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4500 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4501 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4502 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4503 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4504 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4505 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4506 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4507 }
4508
4509 IEMSELDESC DescSS;
4510 if (IEM_IS_V86_MODE(pVCpu))
4511 {
4512 pVCpu->iem.s.uCpl = 3;
4513 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4514 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4515 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4516 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4517 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4518 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4519
4520 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4521 DescSS.Legacy.u = 0;
4522 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4523 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4524 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4525 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4526 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4527 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4528 DescSS.Legacy.Gen.u2Dpl = 3;
4529 }
4530 else
4531 {
4532 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4533
4534 /*
4535 * Load the stack segment for the new task.
4536 */
4537 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4538 {
4539 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4540 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4541 }
4542
4543 /* Fetch the descriptor. */
4544 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4545 if (rcStrict != VINF_SUCCESS)
4546 {
4547 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4548 VBOXSTRICTRC_VAL(rcStrict)));
4549 return rcStrict;
4550 }
4551
4552 /* SS must be a data segment and writable. */
4553 if ( !DescSS.Legacy.Gen.u1DescType
4554 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4555 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4556 {
4557 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4558 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4559 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4560 }
4561
4562 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4563 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4564 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4565 {
4566 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4567 uNewCpl));
4568 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4569 }
4570
4571 /* Is it there? */
4572 if (!DescSS.Legacy.Gen.u1Present)
4573 {
4574 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4575 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4576 }
4577
4578 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4579 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4580
4581 /* Set the accessed bit before committing the result into SS. */
4582 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4583 {
4584 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4585 if (rcStrict != VINF_SUCCESS)
4586 return rcStrict;
4587 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4588 }
4589
4590 /* Commit SS. */
4591 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4592 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4593 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4594 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4595 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4596 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4597 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4598
4599 /* CPL has changed, update IEM before loading rest of segments. */
4600 pVCpu->iem.s.uCpl = uNewCpl;
4601
4602 /*
4603 * Load the data segments for the new task.
4604 */
4605 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4606 if (rcStrict != VINF_SUCCESS)
4607 return rcStrict;
4608 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4609 if (rcStrict != VINF_SUCCESS)
4610 return rcStrict;
4611 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4612 if (rcStrict != VINF_SUCCESS)
4613 return rcStrict;
4614 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4615 if (rcStrict != VINF_SUCCESS)
4616 return rcStrict;
4617
4618 /*
4619 * Load the code segment for the new task.
4620 */
4621 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4622 {
4623 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4624 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4625 }
4626
4627 /* Fetch the descriptor. */
4628 IEMSELDESC DescCS;
4629 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4630 if (rcStrict != VINF_SUCCESS)
4631 {
4632 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4633 return rcStrict;
4634 }
4635
4636 /* CS must be a code segment. */
4637 if ( !DescCS.Legacy.Gen.u1DescType
4638 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4639 {
4640 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4641 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4642 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4643 }
4644
4645 /* For conforming CS, DPL must be less than or equal to the RPL. */
4646 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4647 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4648 {
4649 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4650 DescCS.Legacy.Gen.u2Dpl));
4651 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4652 }
4653
4654 /* For non-conforming CS, DPL must match RPL. */
4655 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4656 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4657 {
4658 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4659 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4660 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4661 }
4662
4663 /* Is it there? */
4664 if (!DescCS.Legacy.Gen.u1Present)
4665 {
4666 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4667 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4668 }
4669
4670 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4671 u64Base = X86DESC_BASE(&DescCS.Legacy);
4672
4673 /* Set the accessed bit before committing the result into CS. */
4674 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4675 {
4676 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4677 if (rcStrict != VINF_SUCCESS)
4678 return rcStrict;
4679 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4680 }
4681
4682 /* Commit CS. */
4683 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4684 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4685 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4686 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4687 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4688 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4689 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4690 }
4691
4692 /** @todo Debug trap. */
4693 if (fIsNewTSS386 && fNewDebugTrap)
4694 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4695
4696 /*
4697 * Construct the error code masks based on what caused this task switch.
4698 * See Intel Instruction reference for INT.
4699 */
4700 uint16_t uExt;
4701 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4702 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4703 {
4704 uExt = 1;
4705 }
4706 else
4707 uExt = 0;
4708
4709 /*
4710 * Push any error code on to the new stack.
4711 */
4712 if (fFlags & IEM_XCPT_FLAGS_ERR)
4713 {
4714 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4715 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4716 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4717
4718 /* Check that there is sufficient space on the stack. */
4719 /** @todo Factor out segment limit checking for normal/expand down segments
4720 * into a separate function. */
4721 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4722 {
4723 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4724 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4725 {
4726 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4727 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4728 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4729 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4730 }
4731 }
4732 else
4733 {
4734 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4735 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4736 {
4737 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4738 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4739 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4740 }
4741 }
4742
4743
4744 if (fIsNewTSS386)
4745 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4746 else
4747 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4748 if (rcStrict != VINF_SUCCESS)
4749 {
4750 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4751 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4752 return rcStrict;
4753 }
4754 }
4755
4756 /* Check the new EIP against the new CS limit. */
4757 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4758 {
4759 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4760 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4761 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4762 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4763 }
4764
4765 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4766 pVCpu->cpum.GstCtx.ss.Sel));
4767 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4768}
4769
4770
4771/**
4772 * Implements exceptions and interrupts for protected mode.
4773 *
4774 * @returns VBox strict status code.
4775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4776 * @param cbInstr The number of bytes to offset rIP by in the return
4777 * address.
4778 * @param u8Vector The interrupt / exception vector number.
4779 * @param fFlags The flags.
4780 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4781 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4782 */
4783IEM_STATIC VBOXSTRICTRC
4784iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4785 uint8_t cbInstr,
4786 uint8_t u8Vector,
4787 uint32_t fFlags,
4788 uint16_t uErr,
4789 uint64_t uCr2)
4790{
4791 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4792
4793 /*
4794 * Read the IDT entry.
4795 */
4796 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4797 {
4798 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4799 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4800 }
4801 X86DESC Idte;
4802 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4803 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4804 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4805 {
4806 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4807 return rcStrict;
4808 }
4809 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4810 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4811 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4812
4813 /*
4814 * Check the descriptor type, DPL and such.
4815 * ASSUMES this is done in the same order as described for call-gate calls.
4816 */
4817 if (Idte.Gate.u1DescType)
4818 {
4819 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4820 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4821 }
4822 bool fTaskGate = false;
4823 uint8_t f32BitGate = true;
4824 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4825 switch (Idte.Gate.u4Type)
4826 {
4827 case X86_SEL_TYPE_SYS_UNDEFINED:
4828 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4829 case X86_SEL_TYPE_SYS_LDT:
4830 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4831 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4832 case X86_SEL_TYPE_SYS_UNDEFINED2:
4833 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4834 case X86_SEL_TYPE_SYS_UNDEFINED3:
4835 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4836 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4837 case X86_SEL_TYPE_SYS_UNDEFINED4:
4838 {
4839 /** @todo check what actually happens when the type is wrong...
4840 * esp. call gates. */
4841 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4842 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4843 }
4844
4845 case X86_SEL_TYPE_SYS_286_INT_GATE:
4846 f32BitGate = false;
4847 RT_FALL_THRU();
4848 case X86_SEL_TYPE_SYS_386_INT_GATE:
4849 fEflToClear |= X86_EFL_IF;
4850 break;
4851
4852 case X86_SEL_TYPE_SYS_TASK_GATE:
4853 fTaskGate = true;
4854#ifndef IEM_IMPLEMENTS_TASKSWITCH
4855 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4856#endif
4857 break;
4858
4859 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4860 f32BitGate = false;
4861 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4862 break;
4863
4864 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4865 }
4866
4867 /* Check DPL against CPL if applicable. */
4868 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4869 {
4870 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4871 {
4872 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4873 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4874 }
4875 }
4876
4877 /* Is it there? */
4878 if (!Idte.Gate.u1Present)
4879 {
4880 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4881 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4882 }
4883
4884 /* Is it a task-gate? */
4885 if (fTaskGate)
4886 {
4887 /*
4888 * Construct the error code masks based on what caused this task switch.
4889 * See Intel Instruction reference for INT.
4890 */
4891 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4892 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4893 RTSEL SelTSS = Idte.Gate.u16Sel;
4894
4895 /*
4896 * Fetch the TSS descriptor in the GDT.
4897 */
4898 IEMSELDESC DescTSS;
4899 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4900 if (rcStrict != VINF_SUCCESS)
4901 {
4902 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4903 VBOXSTRICTRC_VAL(rcStrict)));
4904 return rcStrict;
4905 }
4906
4907 /* The TSS descriptor must be a system segment and be available (not busy). */
4908 if ( DescTSS.Legacy.Gen.u1DescType
4909 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4910 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4911 {
4912 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4913 u8Vector, SelTSS, DescTSS.Legacy.au64));
4914 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4915 }
4916
4917 /* The TSS must be present. */
4918 if (!DescTSS.Legacy.Gen.u1Present)
4919 {
4920 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4921 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4922 }
4923
4924 /* Do the actual task switch. */
4925 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4926 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4927 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4928 }
4929
4930 /* A null CS is bad. */
4931 RTSEL NewCS = Idte.Gate.u16Sel;
4932 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4933 {
4934 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4935 return iemRaiseGeneralProtectionFault0(pVCpu);
4936 }
4937
4938 /* Fetch the descriptor for the new CS. */
4939 IEMSELDESC DescCS;
4940 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4941 if (rcStrict != VINF_SUCCESS)
4942 {
4943 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4944 return rcStrict;
4945 }
4946
4947 /* Must be a code segment. */
4948 if (!DescCS.Legacy.Gen.u1DescType)
4949 {
4950 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4951 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4952 }
4953 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4954 {
4955 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4956 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4957 }
4958
4959 /* Don't allow lowering the privilege level. */
4960 /** @todo Does the lowering of privileges apply to software interrupts
4961 * only? This has bearings on the more-privileged or
4962 * same-privilege stack behavior further down. A testcase would
4963 * be nice. */
4964 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4965 {
4966 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4967 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4968 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4969 }
4970
4971 /* Make sure the selector is present. */
4972 if (!DescCS.Legacy.Gen.u1Present)
4973 {
4974 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4975 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4976 }
4977
4978 /* Check the new EIP against the new CS limit. */
4979 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4980 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4981 ? Idte.Gate.u16OffsetLow
4982 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4983 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4984 if (uNewEip > cbLimitCS)
4985 {
4986 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4987 u8Vector, uNewEip, cbLimitCS, NewCS));
4988 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4989 }
4990 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4991
4992 /* Calc the flag image to push. */
4993 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4994 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4995 fEfl &= ~X86_EFL_RF;
4996 else
4997 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4998
4999 /* From V8086 mode only go to CPL 0. */
5000 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5001 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5002 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5003 {
5004 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5005 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5006 }
5007
5008 /*
5009 * If the privilege level changes, we need to get a new stack from the TSS.
5010 * This in turns means validating the new SS and ESP...
5011 */
5012 if (uNewCpl != pVCpu->iem.s.uCpl)
5013 {
5014 RTSEL NewSS;
5015 uint32_t uNewEsp;
5016 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5017 if (rcStrict != VINF_SUCCESS)
5018 return rcStrict;
5019
5020 IEMSELDESC DescSS;
5021 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5022 if (rcStrict != VINF_SUCCESS)
5023 return rcStrict;
5024 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5025 if (!DescSS.Legacy.Gen.u1DefBig)
5026 {
5027 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5028 uNewEsp = (uint16_t)uNewEsp;
5029 }
5030
5031 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5032
5033 /* Check that there is sufficient space for the stack frame. */
5034 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5035 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5036 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5037 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5038
5039 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5040 {
5041 if ( uNewEsp - 1 > cbLimitSS
5042 || uNewEsp < cbStackFrame)
5043 {
5044 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5045 u8Vector, NewSS, uNewEsp, cbStackFrame));
5046 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5047 }
5048 }
5049 else
5050 {
5051 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5052 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5053 {
5054 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5055 u8Vector, NewSS, uNewEsp, cbStackFrame));
5056 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5057 }
5058 }
5059
5060 /*
5061 * Start making changes.
5062 */
5063
5064 /* Set the new CPL so that stack accesses use it. */
5065 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5066 pVCpu->iem.s.uCpl = uNewCpl;
5067
5068 /* Create the stack frame. */
5069 RTPTRUNION uStackFrame;
5070 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5071 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5072 if (rcStrict != VINF_SUCCESS)
5073 return rcStrict;
5074 void * const pvStackFrame = uStackFrame.pv;
5075 if (f32BitGate)
5076 {
5077 if (fFlags & IEM_XCPT_FLAGS_ERR)
5078 *uStackFrame.pu32++ = uErr;
5079 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5080 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5081 uStackFrame.pu32[2] = fEfl;
5082 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5083 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5084 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5085 if (fEfl & X86_EFL_VM)
5086 {
5087 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5088 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5089 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5090 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5091 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5092 }
5093 }
5094 else
5095 {
5096 if (fFlags & IEM_XCPT_FLAGS_ERR)
5097 *uStackFrame.pu16++ = uErr;
5098 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5099 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5100 uStackFrame.pu16[2] = fEfl;
5101 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5102 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5103 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5104 if (fEfl & X86_EFL_VM)
5105 {
5106 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5107 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5108 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5109 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5110 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5111 }
5112 }
5113 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5114 if (rcStrict != VINF_SUCCESS)
5115 return rcStrict;
5116
5117 /* Mark the selectors 'accessed' (hope this is the correct time). */
5118 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5119 * after pushing the stack frame? (Write protect the gdt + stack to
5120 * find out.) */
5121 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5122 {
5123 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5124 if (rcStrict != VINF_SUCCESS)
5125 return rcStrict;
5126 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5127 }
5128
5129 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5130 {
5131 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5132 if (rcStrict != VINF_SUCCESS)
5133 return rcStrict;
5134 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5135 }
5136
5137 /*
5138 * Start comitting the register changes (joins with the DPL=CPL branch).
5139 */
5140 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5141 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5142 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5143 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5144 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5145 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5146 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5147 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5148 * SP is loaded).
5149 * Need to check the other combinations too:
5150 * - 16-bit TSS, 32-bit handler
5151 * - 32-bit TSS, 16-bit handler */
5152 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5153 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5154 else
5155 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5156
5157 if (fEfl & X86_EFL_VM)
5158 {
5159 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5160 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5161 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5162 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5163 }
5164 }
5165 /*
5166 * Same privilege, no stack change and smaller stack frame.
5167 */
5168 else
5169 {
5170 uint64_t uNewRsp;
5171 RTPTRUNION uStackFrame;
5172 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5173 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5174 if (rcStrict != VINF_SUCCESS)
5175 return rcStrict;
5176 void * const pvStackFrame = uStackFrame.pv;
5177
5178 if (f32BitGate)
5179 {
5180 if (fFlags & IEM_XCPT_FLAGS_ERR)
5181 *uStackFrame.pu32++ = uErr;
5182 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5183 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5184 uStackFrame.pu32[2] = fEfl;
5185 }
5186 else
5187 {
5188 if (fFlags & IEM_XCPT_FLAGS_ERR)
5189 *uStackFrame.pu16++ = uErr;
5190 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5191 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5192 uStackFrame.pu16[2] = fEfl;
5193 }
5194 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5195 if (rcStrict != VINF_SUCCESS)
5196 return rcStrict;
5197
5198 /* Mark the CS selector as 'accessed'. */
5199 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5200 {
5201 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5202 if (rcStrict != VINF_SUCCESS)
5203 return rcStrict;
5204 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5205 }
5206
5207 /*
5208 * Start committing the register changes (joins with the other branch).
5209 */
5210 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5211 }
5212
5213 /* ... register committing continues. */
5214 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5215 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5216 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5217 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5218 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5219 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5220
5221 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5222 fEfl &= ~fEflToClear;
5223 IEMMISC_SET_EFL(pVCpu, fEfl);
5224
5225 if (fFlags & IEM_XCPT_FLAGS_CR2)
5226 pVCpu->cpum.GstCtx.cr2 = uCr2;
5227
5228 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5229 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5230
5231 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5232}
5233
5234
5235/**
5236 * Implements exceptions and interrupts for long mode.
5237 *
5238 * @returns VBox strict status code.
5239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5240 * @param cbInstr The number of bytes to offset rIP by in the return
5241 * address.
5242 * @param u8Vector The interrupt / exception vector number.
5243 * @param fFlags The flags.
5244 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5245 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5246 */
5247IEM_STATIC VBOXSTRICTRC
5248iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5249 uint8_t cbInstr,
5250 uint8_t u8Vector,
5251 uint32_t fFlags,
5252 uint16_t uErr,
5253 uint64_t uCr2)
5254{
5255 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5256
5257 /*
5258 * Read the IDT entry.
5259 */
5260 uint16_t offIdt = (uint16_t)u8Vector << 4;
5261 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5262 {
5263 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5264 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5265 }
5266 X86DESC64 Idte;
5267 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5268 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5269 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5270 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5271 {
5272 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5273 return rcStrict;
5274 }
5275 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5276 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5277 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5278
5279 /*
5280 * Check the descriptor type, DPL and such.
5281 * ASSUMES this is done in the same order as described for call-gate calls.
5282 */
5283 if (Idte.Gate.u1DescType)
5284 {
5285 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5286 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5287 }
5288 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5289 switch (Idte.Gate.u4Type)
5290 {
5291 case AMD64_SEL_TYPE_SYS_INT_GATE:
5292 fEflToClear |= X86_EFL_IF;
5293 break;
5294 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5295 break;
5296
5297 default:
5298 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5299 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5300 }
5301
5302 /* Check DPL against CPL if applicable. */
5303 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5304 {
5305 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5306 {
5307 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5308 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5309 }
5310 }
5311
5312 /* Is it there? */
5313 if (!Idte.Gate.u1Present)
5314 {
5315 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5316 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5317 }
5318
5319 /* A null CS is bad. */
5320 RTSEL NewCS = Idte.Gate.u16Sel;
5321 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5322 {
5323 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5324 return iemRaiseGeneralProtectionFault0(pVCpu);
5325 }
5326
5327 /* Fetch the descriptor for the new CS. */
5328 IEMSELDESC DescCS;
5329 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5330 if (rcStrict != VINF_SUCCESS)
5331 {
5332 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5333 return rcStrict;
5334 }
5335
5336 /* Must be a 64-bit code segment. */
5337 if (!DescCS.Long.Gen.u1DescType)
5338 {
5339 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5340 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5341 }
5342 if ( !DescCS.Long.Gen.u1Long
5343 || DescCS.Long.Gen.u1DefBig
5344 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5345 {
5346 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5347 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5348 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5349 }
5350
5351 /* Don't allow lowering the privilege level. For non-conforming CS
5352 selectors, the CS.DPL sets the privilege level the trap/interrupt
5353 handler runs at. For conforming CS selectors, the CPL remains
5354 unchanged, but the CS.DPL must be <= CPL. */
5355 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5356 * when CPU in Ring-0. Result \#GP? */
5357 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5358 {
5359 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5360 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5361 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5362 }
5363
5364
5365 /* Make sure the selector is present. */
5366 if (!DescCS.Legacy.Gen.u1Present)
5367 {
5368 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5369 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5370 }
5371
5372 /* Check that the new RIP is canonical. */
5373 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5374 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5375 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5376 if (!IEM_IS_CANONICAL(uNewRip))
5377 {
5378 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5379 return iemRaiseGeneralProtectionFault0(pVCpu);
5380 }
5381
5382 /*
5383 * If the privilege level changes or if the IST isn't zero, we need to get
5384 * a new stack from the TSS.
5385 */
5386 uint64_t uNewRsp;
5387 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5388 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5389 if ( uNewCpl != pVCpu->iem.s.uCpl
5390 || Idte.Gate.u3IST != 0)
5391 {
5392 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5393 if (rcStrict != VINF_SUCCESS)
5394 return rcStrict;
5395 }
5396 else
5397 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5398 uNewRsp &= ~(uint64_t)0xf;
5399
5400 /*
5401 * Calc the flag image to push.
5402 */
5403 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5404 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5405 fEfl &= ~X86_EFL_RF;
5406 else
5407 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5408
5409 /*
5410 * Start making changes.
5411 */
5412 /* Set the new CPL so that stack accesses use it. */
5413 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5414 pVCpu->iem.s.uCpl = uNewCpl;
5415
5416 /* Create the stack frame. */
5417 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5418 RTPTRUNION uStackFrame;
5419 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5420 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5421 if (rcStrict != VINF_SUCCESS)
5422 return rcStrict;
5423 void * const pvStackFrame = uStackFrame.pv;
5424
5425 if (fFlags & IEM_XCPT_FLAGS_ERR)
5426 *uStackFrame.pu64++ = uErr;
5427 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5428 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5429 uStackFrame.pu64[2] = fEfl;
5430 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5431 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5432 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5433 if (rcStrict != VINF_SUCCESS)
5434 return rcStrict;
5435
5436 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5437 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5438 * after pushing the stack frame? (Write protect the gdt + stack to
5439 * find out.) */
5440 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5441 {
5442 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5443 if (rcStrict != VINF_SUCCESS)
5444 return rcStrict;
5445 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5446 }
5447
5448 /*
5449 * Start comitting the register changes.
5450 */
5451 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5452 * hidden registers when interrupting 32-bit or 16-bit code! */
5453 if (uNewCpl != uOldCpl)
5454 {
5455 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5456 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5457 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5458 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5459 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5460 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5461 }
5462 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5463 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5464 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5465 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5466 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5467 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5468 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5469 pVCpu->cpum.GstCtx.rip = uNewRip;
5470
5471 fEfl &= ~fEflToClear;
5472 IEMMISC_SET_EFL(pVCpu, fEfl);
5473
5474 if (fFlags & IEM_XCPT_FLAGS_CR2)
5475 pVCpu->cpum.GstCtx.cr2 = uCr2;
5476
5477 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5478 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5479
5480 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5481}
5482
5483
5484/**
5485 * Implements exceptions and interrupts.
5486 *
5487 * All exceptions and interrupts goes thru this function!
5488 *
5489 * @returns VBox strict status code.
5490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5491 * @param cbInstr The number of bytes to offset rIP by in the return
5492 * address.
5493 * @param u8Vector The interrupt / exception vector number.
5494 * @param fFlags The flags.
5495 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5496 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5497 */
5498DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5499iemRaiseXcptOrInt(PVMCPU pVCpu,
5500 uint8_t cbInstr,
5501 uint8_t u8Vector,
5502 uint32_t fFlags,
5503 uint16_t uErr,
5504 uint64_t uCr2)
5505{
5506 /*
5507 * Get all the state that we might need here.
5508 */
5509 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5510 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5511
5512#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5513 /*
5514 * Flush prefetch buffer
5515 */
5516 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5517#endif
5518
5519 /*
5520 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5521 */
5522 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5523 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5524 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5525 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5526 {
5527 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5528 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5529 u8Vector = X86_XCPT_GP;
5530 uErr = 0;
5531 }
5532#ifdef DBGFTRACE_ENABLED
5533 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5534 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5535 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5536#endif
5537
5538#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5539 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5540 {
5541 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5542 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5543 return rcStrict0;
5544 }
5545#endif
5546
5547#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5548 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5549 {
5550 /*
5551 * If the event is being injected as part of VMRUN, it isn't subject to event
5552 * intercepts in the nested-guest. However, secondary exceptions that occur
5553 * during injection of any event -are- subject to exception intercepts.
5554 *
5555 * See AMD spec. 15.20 "Event Injection".
5556 */
5557 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5558 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5559 else
5560 {
5561 /*
5562 * Check and handle if the event being raised is intercepted.
5563 */
5564 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5565 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5566 return rcStrict0;
5567 }
5568 }
5569#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5570
5571 /*
5572 * Do recursion accounting.
5573 */
5574 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5575 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5576 if (pVCpu->iem.s.cXcptRecursions == 0)
5577 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5578 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5579 else
5580 {
5581 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5582 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5583 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5584
5585 if (pVCpu->iem.s.cXcptRecursions >= 4)
5586 {
5587#ifdef DEBUG_bird
5588 AssertFailed();
5589#endif
5590 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5591 }
5592
5593 /*
5594 * Evaluate the sequence of recurring events.
5595 */
5596 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5597 NULL /* pXcptRaiseInfo */);
5598 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5599 { /* likely */ }
5600 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5601 {
5602 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5603 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5604 u8Vector = X86_XCPT_DF;
5605 uErr = 0;
5606 /** @todo NSTVMX: Do we need to do something here for VMX? */
5607 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5608 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5609 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5610 }
5611 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5612 {
5613 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5614 return iemInitiateCpuShutdown(pVCpu);
5615 }
5616 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5617 {
5618 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5619 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5620 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5621 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5622 return VERR_EM_GUEST_CPU_HANG;
5623 }
5624 else
5625 {
5626 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5627 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5628 return VERR_IEM_IPE_9;
5629 }
5630
5631 /*
5632 * The 'EXT' bit is set when an exception occurs during deliver of an external
5633 * event (such as an interrupt or earlier exception)[1]. Privileged software
5634 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5635 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5636 *
5637 * [1] - Intel spec. 6.13 "Error Code"
5638 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5639 * [3] - Intel Instruction reference for INT n.
5640 */
5641 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5642 && (fFlags & IEM_XCPT_FLAGS_ERR)
5643 && u8Vector != X86_XCPT_PF
5644 && u8Vector != X86_XCPT_DF)
5645 {
5646 uErr |= X86_TRAP_ERR_EXTERNAL;
5647 }
5648 }
5649
5650 pVCpu->iem.s.cXcptRecursions++;
5651 pVCpu->iem.s.uCurXcpt = u8Vector;
5652 pVCpu->iem.s.fCurXcpt = fFlags;
5653 pVCpu->iem.s.uCurXcptErr = uErr;
5654 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5655
5656 /*
5657 * Extensive logging.
5658 */
5659#if defined(LOG_ENABLED) && defined(IN_RING3)
5660 if (LogIs3Enabled())
5661 {
5662 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5663 PVM pVM = pVCpu->CTX_SUFF(pVM);
5664 char szRegs[4096];
5665 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5666 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5667 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5668 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5669 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5670 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5671 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5672 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5673 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5674 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5675 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5676 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5677 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5678 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5679 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5680 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5681 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5682 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5683 " efer=%016VR{efer}\n"
5684 " pat=%016VR{pat}\n"
5685 " sf_mask=%016VR{sf_mask}\n"
5686 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5687 " lstar=%016VR{lstar}\n"
5688 " star=%016VR{star} cstar=%016VR{cstar}\n"
5689 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5690 );
5691
5692 char szInstr[256];
5693 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5694 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5695 szInstr, sizeof(szInstr), NULL);
5696 Log3(("%s%s\n", szRegs, szInstr));
5697 }
5698#endif /* LOG_ENABLED */
5699
5700 /*
5701 * Call the mode specific worker function.
5702 */
5703 VBOXSTRICTRC rcStrict;
5704 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5705 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5706 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5707 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5708 else
5709 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5710
5711 /* Flush the prefetch buffer. */
5712#ifdef IEM_WITH_CODE_TLB
5713 pVCpu->iem.s.pbInstrBuf = NULL;
5714#else
5715 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5716#endif
5717
5718 /*
5719 * Unwind.
5720 */
5721 pVCpu->iem.s.cXcptRecursions--;
5722 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5723 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5724 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5725 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5726 pVCpu->iem.s.cXcptRecursions + 1));
5727 return rcStrict;
5728}
5729
5730#ifdef IEM_WITH_SETJMP
5731/**
5732 * See iemRaiseXcptOrInt. Will not return.
5733 */
5734IEM_STATIC DECL_NO_RETURN(void)
5735iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5736 uint8_t cbInstr,
5737 uint8_t u8Vector,
5738 uint32_t fFlags,
5739 uint16_t uErr,
5740 uint64_t uCr2)
5741{
5742 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5743 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5744}
5745#endif
5746
5747
5748/** \#DE - 00. */
5749DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5750{
5751 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5752}
5753
5754
5755/** \#DB - 01.
5756 * @note This automatically clear DR7.GD. */
5757DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5758{
5759 /** @todo set/clear RF. */
5760 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5761 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5762}
5763
5764
5765/** \#BR - 05. */
5766DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5767{
5768 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5769}
5770
5771
5772/** \#UD - 06. */
5773DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5774{
5775 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5776}
5777
5778
5779/** \#NM - 07. */
5780DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5781{
5782 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5783}
5784
5785
5786/** \#TS(err) - 0a. */
5787DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5788{
5789 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5790}
5791
5792
5793/** \#TS(tr) - 0a. */
5794DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5795{
5796 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5797 pVCpu->cpum.GstCtx.tr.Sel, 0);
5798}
5799
5800
5801/** \#TS(0) - 0a. */
5802DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5803{
5804 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5805 0, 0);
5806}
5807
5808
5809/** \#TS(err) - 0a. */
5810DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5811{
5812 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5813 uSel & X86_SEL_MASK_OFF_RPL, 0);
5814}
5815
5816
5817/** \#NP(err) - 0b. */
5818DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5819{
5820 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5821}
5822
5823
5824/** \#NP(sel) - 0b. */
5825DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5826{
5827 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5828 uSel & ~X86_SEL_RPL, 0);
5829}
5830
5831
5832/** \#SS(seg) - 0c. */
5833DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5834{
5835 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5836 uSel & ~X86_SEL_RPL, 0);
5837}
5838
5839
5840/** \#SS(err) - 0c. */
5841DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5842{
5843 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5844}
5845
5846
5847/** \#GP(n) - 0d. */
5848DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5849{
5850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5851}
5852
5853
5854/** \#GP(0) - 0d. */
5855DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5856{
5857 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5858}
5859
5860#ifdef IEM_WITH_SETJMP
5861/** \#GP(0) - 0d. */
5862DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5863{
5864 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5865}
5866#endif
5867
5868
5869/** \#GP(sel) - 0d. */
5870DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5871{
5872 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5873 Sel & ~X86_SEL_RPL, 0);
5874}
5875
5876
5877/** \#GP(0) - 0d. */
5878DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5879{
5880 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5881}
5882
5883
5884/** \#GP(sel) - 0d. */
5885DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5886{
5887 NOREF(iSegReg); NOREF(fAccess);
5888 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5889 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5890}
5891
5892#ifdef IEM_WITH_SETJMP
5893/** \#GP(sel) - 0d, longjmp. */
5894DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5895{
5896 NOREF(iSegReg); NOREF(fAccess);
5897 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5898 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5899}
5900#endif
5901
5902/** \#GP(sel) - 0d. */
5903DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5904{
5905 NOREF(Sel);
5906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5907}
5908
5909#ifdef IEM_WITH_SETJMP
5910/** \#GP(sel) - 0d, longjmp. */
5911DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5912{
5913 NOREF(Sel);
5914 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5915}
5916#endif
5917
5918
5919/** \#GP(sel) - 0d. */
5920DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5921{
5922 NOREF(iSegReg); NOREF(fAccess);
5923 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5924}
5925
5926#ifdef IEM_WITH_SETJMP
5927/** \#GP(sel) - 0d, longjmp. */
5928DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5929 uint32_t fAccess)
5930{
5931 NOREF(iSegReg); NOREF(fAccess);
5932 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5933}
5934#endif
5935
5936
5937/** \#PF(n) - 0e. */
5938DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5939{
5940 uint16_t uErr;
5941 switch (rc)
5942 {
5943 case VERR_PAGE_NOT_PRESENT:
5944 case VERR_PAGE_TABLE_NOT_PRESENT:
5945 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5946 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5947 uErr = 0;
5948 break;
5949
5950 default:
5951 AssertMsgFailed(("%Rrc\n", rc));
5952 RT_FALL_THRU();
5953 case VERR_ACCESS_DENIED:
5954 uErr = X86_TRAP_PF_P;
5955 break;
5956
5957 /** @todo reserved */
5958 }
5959
5960 if (pVCpu->iem.s.uCpl == 3)
5961 uErr |= X86_TRAP_PF_US;
5962
5963 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5964 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5965 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5966 uErr |= X86_TRAP_PF_ID;
5967
5968#if 0 /* This is so much non-sense, really. Why was it done like that? */
5969 /* Note! RW access callers reporting a WRITE protection fault, will clear
5970 the READ flag before calling. So, read-modify-write accesses (RW)
5971 can safely be reported as READ faults. */
5972 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5973 uErr |= X86_TRAP_PF_RW;
5974#else
5975 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5976 {
5977 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5978 uErr |= X86_TRAP_PF_RW;
5979 }
5980#endif
5981
5982 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5983 uErr, GCPtrWhere);
5984}
5985
5986#ifdef IEM_WITH_SETJMP
5987/** \#PF(n) - 0e, longjmp. */
5988IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5989{
5990 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5991}
5992#endif
5993
5994
5995/** \#MF(0) - 10. */
5996DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5997{
5998 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5999}
6000
6001
6002/** \#AC(0) - 11. */
6003DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6004{
6005 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6006}
6007
6008
6009/**
6010 * Macro for calling iemCImplRaiseDivideError().
6011 *
6012 * This enables us to add/remove arguments and force different levels of
6013 * inlining as we wish.
6014 *
6015 * @return Strict VBox status code.
6016 */
6017#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6018IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6019{
6020 NOREF(cbInstr);
6021 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6022}
6023
6024
6025/**
6026 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6027 *
6028 * This enables us to add/remove arguments and force different levels of
6029 * inlining as we wish.
6030 *
6031 * @return Strict VBox status code.
6032 */
6033#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6034IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6035{
6036 NOREF(cbInstr);
6037 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6038}
6039
6040
6041/**
6042 * Macro for calling iemCImplRaiseInvalidOpcode().
6043 *
6044 * This enables us to add/remove arguments and force different levels of
6045 * inlining as we wish.
6046 *
6047 * @return Strict VBox status code.
6048 */
6049#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6050IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6051{
6052 NOREF(cbInstr);
6053 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6054}
6055
6056
6057/** @} */
6058
6059
6060/*
6061 *
6062 * Helpers routines.
6063 * Helpers routines.
6064 * Helpers routines.
6065 *
6066 */
6067
6068/**
6069 * Recalculates the effective operand size.
6070 *
6071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6072 */
6073IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6074{
6075 switch (pVCpu->iem.s.enmCpuMode)
6076 {
6077 case IEMMODE_16BIT:
6078 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6079 break;
6080 case IEMMODE_32BIT:
6081 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6082 break;
6083 case IEMMODE_64BIT:
6084 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6085 {
6086 case 0:
6087 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6088 break;
6089 case IEM_OP_PRF_SIZE_OP:
6090 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6091 break;
6092 case IEM_OP_PRF_SIZE_REX_W:
6093 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6094 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6095 break;
6096 }
6097 break;
6098 default:
6099 AssertFailed();
6100 }
6101}
6102
6103
6104/**
6105 * Sets the default operand size to 64-bit and recalculates the effective
6106 * operand size.
6107 *
6108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6109 */
6110IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6111{
6112 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6113 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6114 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6115 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6116 else
6117 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6118}
6119
6120
6121/*
6122 *
6123 * Common opcode decoders.
6124 * Common opcode decoders.
6125 * Common opcode decoders.
6126 *
6127 */
6128//#include <iprt/mem.h>
6129
6130/**
6131 * Used to add extra details about a stub case.
6132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6133 */
6134IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6135{
6136#if defined(LOG_ENABLED) && defined(IN_RING3)
6137 PVM pVM = pVCpu->CTX_SUFF(pVM);
6138 char szRegs[4096];
6139 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6140 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6141 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6142 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6143 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6144 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6145 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6146 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6147 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6148 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6149 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6150 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6151 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6152 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6153 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6154 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6155 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6156 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6157 " efer=%016VR{efer}\n"
6158 " pat=%016VR{pat}\n"
6159 " sf_mask=%016VR{sf_mask}\n"
6160 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6161 " lstar=%016VR{lstar}\n"
6162 " star=%016VR{star} cstar=%016VR{cstar}\n"
6163 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6164 );
6165
6166 char szInstr[256];
6167 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6168 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6169 szInstr, sizeof(szInstr), NULL);
6170
6171 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6172#else
6173 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6174#endif
6175}
6176
6177/**
6178 * Complains about a stub.
6179 *
6180 * Providing two versions of this macro, one for daily use and one for use when
6181 * working on IEM.
6182 */
6183#if 0
6184# define IEMOP_BITCH_ABOUT_STUB() \
6185 do { \
6186 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6187 iemOpStubMsg2(pVCpu); \
6188 RTAssertPanic(); \
6189 } while (0)
6190#else
6191# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6192#endif
6193
6194/** Stubs an opcode. */
6195#define FNIEMOP_STUB(a_Name) \
6196 FNIEMOP_DEF(a_Name) \
6197 { \
6198 RT_NOREF_PV(pVCpu); \
6199 IEMOP_BITCH_ABOUT_STUB(); \
6200 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6201 } \
6202 typedef int ignore_semicolon
6203
6204/** Stubs an opcode. */
6205#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6206 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6207 { \
6208 RT_NOREF_PV(pVCpu); \
6209 RT_NOREF_PV(a_Name0); \
6210 IEMOP_BITCH_ABOUT_STUB(); \
6211 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6212 } \
6213 typedef int ignore_semicolon
6214
6215/** Stubs an opcode which currently should raise \#UD. */
6216#define FNIEMOP_UD_STUB(a_Name) \
6217 FNIEMOP_DEF(a_Name) \
6218 { \
6219 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6220 return IEMOP_RAISE_INVALID_OPCODE(); \
6221 } \
6222 typedef int ignore_semicolon
6223
6224/** Stubs an opcode which currently should raise \#UD. */
6225#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6226 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6227 { \
6228 RT_NOREF_PV(pVCpu); \
6229 RT_NOREF_PV(a_Name0); \
6230 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6231 return IEMOP_RAISE_INVALID_OPCODE(); \
6232 } \
6233 typedef int ignore_semicolon
6234
6235
6236
6237/** @name Register Access.
6238 * @{
6239 */
6240
6241/**
6242 * Gets a reference (pointer) to the specified hidden segment register.
6243 *
6244 * @returns Hidden register reference.
6245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6246 * @param iSegReg The segment register.
6247 */
6248IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6249{
6250 Assert(iSegReg < X86_SREG_COUNT);
6251 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6252 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6253
6254#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6255 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6256 { /* likely */ }
6257 else
6258 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6259#else
6260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6261#endif
6262 return pSReg;
6263}
6264
6265
6266/**
6267 * Ensures that the given hidden segment register is up to date.
6268 *
6269 * @returns Hidden register reference.
6270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6271 * @param pSReg The segment register.
6272 */
6273IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6274{
6275#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6276 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6277 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6278#else
6279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6280 NOREF(pVCpu);
6281#endif
6282 return pSReg;
6283}
6284
6285
6286/**
6287 * Gets a reference (pointer) to the specified segment register (the selector
6288 * value).
6289 *
6290 * @returns Pointer to the selector variable.
6291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6292 * @param iSegReg The segment register.
6293 */
6294DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6295{
6296 Assert(iSegReg < X86_SREG_COUNT);
6297 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6298 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6299}
6300
6301
6302/**
6303 * Fetches the selector value of a segment register.
6304 *
6305 * @returns The selector value.
6306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6307 * @param iSegReg The segment register.
6308 */
6309DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6310{
6311 Assert(iSegReg < X86_SREG_COUNT);
6312 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6313 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6314}
6315
6316
6317/**
6318 * Fetches the base address value of a segment register.
6319 *
6320 * @returns The selector value.
6321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6322 * @param iSegReg The segment register.
6323 */
6324DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6325{
6326 Assert(iSegReg < X86_SREG_COUNT);
6327 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6328 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6329}
6330
6331
6332/**
6333 * Gets a reference (pointer) to the specified general purpose register.
6334 *
6335 * @returns Register reference.
6336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6337 * @param iReg The general purpose register.
6338 */
6339DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6340{
6341 Assert(iReg < 16);
6342 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6343}
6344
6345
6346/**
6347 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6348 *
6349 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6350 *
6351 * @returns Register reference.
6352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6353 * @param iReg The register.
6354 */
6355DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6356{
6357 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6358 {
6359 Assert(iReg < 16);
6360 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6361 }
6362 /* high 8-bit register. */
6363 Assert(iReg < 8);
6364 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6365}
6366
6367
6368/**
6369 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6370 *
6371 * @returns Register reference.
6372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6373 * @param iReg The register.
6374 */
6375DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6376{
6377 Assert(iReg < 16);
6378 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6379}
6380
6381
6382/**
6383 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6384 *
6385 * @returns Register reference.
6386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6387 * @param iReg The register.
6388 */
6389DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6390{
6391 Assert(iReg < 16);
6392 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6393}
6394
6395
6396/**
6397 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6398 *
6399 * @returns Register reference.
6400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6401 * @param iReg The register.
6402 */
6403DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6404{
6405 Assert(iReg < 64);
6406 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6407}
6408
6409
6410/**
6411 * Gets a reference (pointer) to the specified segment register's base address.
6412 *
6413 * @returns Segment register base address reference.
6414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6415 * @param iSegReg The segment selector.
6416 */
6417DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6418{
6419 Assert(iSegReg < X86_SREG_COUNT);
6420 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6421 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6422}
6423
6424
6425/**
6426 * Fetches the value of a 8-bit general purpose register.
6427 *
6428 * @returns The register value.
6429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6430 * @param iReg The register.
6431 */
6432DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6433{
6434 return *iemGRegRefU8(pVCpu, iReg);
6435}
6436
6437
6438/**
6439 * Fetches the value of a 16-bit general purpose register.
6440 *
6441 * @returns The register value.
6442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6443 * @param iReg The register.
6444 */
6445DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6446{
6447 Assert(iReg < 16);
6448 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6449}
6450
6451
6452/**
6453 * Fetches the value of a 32-bit general purpose register.
6454 *
6455 * @returns The register value.
6456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6457 * @param iReg The register.
6458 */
6459DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6460{
6461 Assert(iReg < 16);
6462 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6463}
6464
6465
6466/**
6467 * Fetches the value of a 64-bit general purpose register.
6468 *
6469 * @returns The register value.
6470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6471 * @param iReg The register.
6472 */
6473DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6474{
6475 Assert(iReg < 16);
6476 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6477}
6478
6479
6480/**
6481 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6482 *
6483 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6484 * segment limit.
6485 *
6486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6487 * @param offNextInstr The offset of the next instruction.
6488 */
6489IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6490{
6491 switch (pVCpu->iem.s.enmEffOpSize)
6492 {
6493 case IEMMODE_16BIT:
6494 {
6495 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6496 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6497 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6498 return iemRaiseGeneralProtectionFault0(pVCpu);
6499 pVCpu->cpum.GstCtx.rip = uNewIp;
6500 break;
6501 }
6502
6503 case IEMMODE_32BIT:
6504 {
6505 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6506 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6507
6508 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6509 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6510 return iemRaiseGeneralProtectionFault0(pVCpu);
6511 pVCpu->cpum.GstCtx.rip = uNewEip;
6512 break;
6513 }
6514
6515 case IEMMODE_64BIT:
6516 {
6517 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6518
6519 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6520 if (!IEM_IS_CANONICAL(uNewRip))
6521 return iemRaiseGeneralProtectionFault0(pVCpu);
6522 pVCpu->cpum.GstCtx.rip = uNewRip;
6523 break;
6524 }
6525
6526 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6527 }
6528
6529 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6530
6531#ifndef IEM_WITH_CODE_TLB
6532 /* Flush the prefetch buffer. */
6533 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6534#endif
6535
6536 return VINF_SUCCESS;
6537}
6538
6539
6540/**
6541 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6542 *
6543 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6544 * segment limit.
6545 *
6546 * @returns Strict VBox status code.
6547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6548 * @param offNextInstr The offset of the next instruction.
6549 */
6550IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6551{
6552 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6553
6554 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6555 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6556 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6557 return iemRaiseGeneralProtectionFault0(pVCpu);
6558 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6559 pVCpu->cpum.GstCtx.rip = uNewIp;
6560 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6561
6562#ifndef IEM_WITH_CODE_TLB
6563 /* Flush the prefetch buffer. */
6564 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6565#endif
6566
6567 return VINF_SUCCESS;
6568}
6569
6570
6571/**
6572 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6573 *
6574 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6575 * segment limit.
6576 *
6577 * @returns Strict VBox status code.
6578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6579 * @param offNextInstr The offset of the next instruction.
6580 */
6581IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6582{
6583 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6584
6585 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6586 {
6587 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6588
6589 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6590 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6591 return iemRaiseGeneralProtectionFault0(pVCpu);
6592 pVCpu->cpum.GstCtx.rip = uNewEip;
6593 }
6594 else
6595 {
6596 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6597
6598 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6599 if (!IEM_IS_CANONICAL(uNewRip))
6600 return iemRaiseGeneralProtectionFault0(pVCpu);
6601 pVCpu->cpum.GstCtx.rip = uNewRip;
6602 }
6603 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6604
6605#ifndef IEM_WITH_CODE_TLB
6606 /* Flush the prefetch buffer. */
6607 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6608#endif
6609
6610 return VINF_SUCCESS;
6611}
6612
6613
6614/**
6615 * Performs a near jump to the specified address.
6616 *
6617 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6618 * segment limit.
6619 *
6620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6621 * @param uNewRip The new RIP value.
6622 */
6623IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6624{
6625 switch (pVCpu->iem.s.enmEffOpSize)
6626 {
6627 case IEMMODE_16BIT:
6628 {
6629 Assert(uNewRip <= UINT16_MAX);
6630 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6631 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6632 return iemRaiseGeneralProtectionFault0(pVCpu);
6633 /** @todo Test 16-bit jump in 64-bit mode. */
6634 pVCpu->cpum.GstCtx.rip = uNewRip;
6635 break;
6636 }
6637
6638 case IEMMODE_32BIT:
6639 {
6640 Assert(uNewRip <= UINT32_MAX);
6641 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6642 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6643
6644 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6645 return iemRaiseGeneralProtectionFault0(pVCpu);
6646 pVCpu->cpum.GstCtx.rip = uNewRip;
6647 break;
6648 }
6649
6650 case IEMMODE_64BIT:
6651 {
6652 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6653
6654 if (!IEM_IS_CANONICAL(uNewRip))
6655 return iemRaiseGeneralProtectionFault0(pVCpu);
6656 pVCpu->cpum.GstCtx.rip = uNewRip;
6657 break;
6658 }
6659
6660 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6661 }
6662
6663 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6664
6665#ifndef IEM_WITH_CODE_TLB
6666 /* Flush the prefetch buffer. */
6667 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6668#endif
6669
6670 return VINF_SUCCESS;
6671}
6672
6673
6674/**
6675 * Get the address of the top of the stack.
6676 *
6677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6678 */
6679DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6680{
6681 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6682 return pVCpu->cpum.GstCtx.rsp;
6683 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6684 return pVCpu->cpum.GstCtx.esp;
6685 return pVCpu->cpum.GstCtx.sp;
6686}
6687
6688
6689/**
6690 * Updates the RIP/EIP/IP to point to the next instruction.
6691 *
6692 * This function leaves the EFLAGS.RF flag alone.
6693 *
6694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6695 * @param cbInstr The number of bytes to add.
6696 */
6697IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6698{
6699 switch (pVCpu->iem.s.enmCpuMode)
6700 {
6701 case IEMMODE_16BIT:
6702 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6703 pVCpu->cpum.GstCtx.eip += cbInstr;
6704 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6705 break;
6706
6707 case IEMMODE_32BIT:
6708 pVCpu->cpum.GstCtx.eip += cbInstr;
6709 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6710 break;
6711
6712 case IEMMODE_64BIT:
6713 pVCpu->cpum.GstCtx.rip += cbInstr;
6714 break;
6715 default: AssertFailed();
6716 }
6717}
6718
6719
6720#if 0
6721/**
6722 * Updates the RIP/EIP/IP to point to the next instruction.
6723 *
6724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6725 */
6726IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6727{
6728 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6729}
6730#endif
6731
6732
6733
6734/**
6735 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6736 *
6737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6738 * @param cbInstr The number of bytes to add.
6739 */
6740IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6741{
6742 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6743
6744 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6745#if ARCH_BITS >= 64
6746 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6747 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6748 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6749#else
6750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6751 pVCpu->cpum.GstCtx.rip += cbInstr;
6752 else
6753 pVCpu->cpum.GstCtx.eip += cbInstr;
6754#endif
6755}
6756
6757
6758/**
6759 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6760 *
6761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6762 */
6763IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6764{
6765 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6766}
6767
6768
6769/**
6770 * Adds to the stack pointer.
6771 *
6772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6773 * @param cbToAdd The number of bytes to add (8-bit!).
6774 */
6775DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6776{
6777 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6778 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6779 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6780 pVCpu->cpum.GstCtx.esp += cbToAdd;
6781 else
6782 pVCpu->cpum.GstCtx.sp += cbToAdd;
6783}
6784
6785
6786/**
6787 * Subtracts from the stack pointer.
6788 *
6789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6790 * @param cbToSub The number of bytes to subtract (8-bit!).
6791 */
6792DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6793{
6794 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6795 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6796 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6797 pVCpu->cpum.GstCtx.esp -= cbToSub;
6798 else
6799 pVCpu->cpum.GstCtx.sp -= cbToSub;
6800}
6801
6802
6803/**
6804 * Adds to the temporary stack pointer.
6805 *
6806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6807 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6808 * @param cbToAdd The number of bytes to add (16-bit).
6809 */
6810DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6811{
6812 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6813 pTmpRsp->u += cbToAdd;
6814 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6815 pTmpRsp->DWords.dw0 += cbToAdd;
6816 else
6817 pTmpRsp->Words.w0 += cbToAdd;
6818}
6819
6820
6821/**
6822 * Subtracts from the temporary stack pointer.
6823 *
6824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6825 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6826 * @param cbToSub The number of bytes to subtract.
6827 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6828 * expecting that.
6829 */
6830DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6831{
6832 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6833 pTmpRsp->u -= cbToSub;
6834 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6835 pTmpRsp->DWords.dw0 -= cbToSub;
6836 else
6837 pTmpRsp->Words.w0 -= cbToSub;
6838}
6839
6840
6841/**
6842 * Calculates the effective stack address for a push of the specified size as
6843 * well as the new RSP value (upper bits may be masked).
6844 *
6845 * @returns Effective stack addressf for the push.
6846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6847 * @param cbItem The size of the stack item to pop.
6848 * @param puNewRsp Where to return the new RSP value.
6849 */
6850DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6851{
6852 RTUINT64U uTmpRsp;
6853 RTGCPTR GCPtrTop;
6854 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6855
6856 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6857 GCPtrTop = uTmpRsp.u -= cbItem;
6858 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6859 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6860 else
6861 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6862 *puNewRsp = uTmpRsp.u;
6863 return GCPtrTop;
6864}
6865
6866
6867/**
6868 * Gets the current stack pointer and calculates the value after a pop of the
6869 * specified size.
6870 *
6871 * @returns Current stack pointer.
6872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6873 * @param cbItem The size of the stack item to pop.
6874 * @param puNewRsp Where to return the new RSP value.
6875 */
6876DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6877{
6878 RTUINT64U uTmpRsp;
6879 RTGCPTR GCPtrTop;
6880 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6881
6882 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6883 {
6884 GCPtrTop = uTmpRsp.u;
6885 uTmpRsp.u += cbItem;
6886 }
6887 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6888 {
6889 GCPtrTop = uTmpRsp.DWords.dw0;
6890 uTmpRsp.DWords.dw0 += cbItem;
6891 }
6892 else
6893 {
6894 GCPtrTop = uTmpRsp.Words.w0;
6895 uTmpRsp.Words.w0 += cbItem;
6896 }
6897 *puNewRsp = uTmpRsp.u;
6898 return GCPtrTop;
6899}
6900
6901
6902/**
6903 * Calculates the effective stack address for a push of the specified size as
6904 * well as the new temporary RSP value (upper bits may be masked).
6905 *
6906 * @returns Effective stack addressf for the push.
6907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6908 * @param pTmpRsp The temporary stack pointer. This is updated.
6909 * @param cbItem The size of the stack item to pop.
6910 */
6911DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6912{
6913 RTGCPTR GCPtrTop;
6914
6915 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6916 GCPtrTop = pTmpRsp->u -= cbItem;
6917 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6918 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6919 else
6920 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6921 return GCPtrTop;
6922}
6923
6924
6925/**
6926 * Gets the effective stack address for a pop of the specified size and
6927 * calculates and updates the temporary RSP.
6928 *
6929 * @returns Current stack pointer.
6930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6931 * @param pTmpRsp The temporary stack pointer. This is updated.
6932 * @param cbItem The size of the stack item to pop.
6933 */
6934DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6935{
6936 RTGCPTR GCPtrTop;
6937 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6938 {
6939 GCPtrTop = pTmpRsp->u;
6940 pTmpRsp->u += cbItem;
6941 }
6942 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6943 {
6944 GCPtrTop = pTmpRsp->DWords.dw0;
6945 pTmpRsp->DWords.dw0 += cbItem;
6946 }
6947 else
6948 {
6949 GCPtrTop = pTmpRsp->Words.w0;
6950 pTmpRsp->Words.w0 += cbItem;
6951 }
6952 return GCPtrTop;
6953}
6954
6955/** @} */
6956
6957
6958/** @name FPU access and helpers.
6959 *
6960 * @{
6961 */
6962
6963
6964/**
6965 * Hook for preparing to use the host FPU.
6966 *
6967 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6968 *
6969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6970 */
6971DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6972{
6973#ifdef IN_RING3
6974 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6975#else
6976 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6977#endif
6978 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6979}
6980
6981
6982/**
6983 * Hook for preparing to use the host FPU for SSE.
6984 *
6985 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6986 *
6987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6988 */
6989DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6990{
6991 iemFpuPrepareUsage(pVCpu);
6992}
6993
6994
6995/**
6996 * Hook for preparing to use the host FPU for AVX.
6997 *
6998 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6999 *
7000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7001 */
7002DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7003{
7004 iemFpuPrepareUsage(pVCpu);
7005}
7006
7007
7008/**
7009 * Hook for actualizing the guest FPU state before the interpreter reads it.
7010 *
7011 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7012 *
7013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7014 */
7015DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7016{
7017#ifdef IN_RING3
7018 NOREF(pVCpu);
7019#else
7020 CPUMRZFpuStateActualizeForRead(pVCpu);
7021#endif
7022 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7023}
7024
7025
7026/**
7027 * Hook for actualizing the guest FPU state before the interpreter changes it.
7028 *
7029 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7030 *
7031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7032 */
7033DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7034{
7035#ifdef IN_RING3
7036 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7037#else
7038 CPUMRZFpuStateActualizeForChange(pVCpu);
7039#endif
7040 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7041}
7042
7043
7044/**
7045 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7046 * only.
7047 *
7048 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7049 *
7050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7051 */
7052DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7053{
7054#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7055 NOREF(pVCpu);
7056#else
7057 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7058#endif
7059 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7060}
7061
7062
7063/**
7064 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7065 * read+write.
7066 *
7067 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7068 *
7069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7070 */
7071DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7072{
7073#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7074 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7075#else
7076 CPUMRZFpuStateActualizeForChange(pVCpu);
7077#endif
7078 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7079}
7080
7081
7082/**
7083 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7084 * only.
7085 *
7086 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7087 *
7088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7089 */
7090DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7091{
7092#ifdef IN_RING3
7093 NOREF(pVCpu);
7094#else
7095 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7096#endif
7097 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7098}
7099
7100
7101/**
7102 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7103 * read+write.
7104 *
7105 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7106 *
7107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7108 */
7109DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7110{
7111#ifdef IN_RING3
7112 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7113#else
7114 CPUMRZFpuStateActualizeForChange(pVCpu);
7115#endif
7116 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7117}
7118
7119
7120/**
7121 * Stores a QNaN value into a FPU register.
7122 *
7123 * @param pReg Pointer to the register.
7124 */
7125DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7126{
7127 pReg->au32[0] = UINT32_C(0x00000000);
7128 pReg->au32[1] = UINT32_C(0xc0000000);
7129 pReg->au16[4] = UINT16_C(0xffff);
7130}
7131
7132
7133/**
7134 * Updates the FOP, FPU.CS and FPUIP registers.
7135 *
7136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7137 * @param pFpuCtx The FPU context.
7138 */
7139DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7140{
7141 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7142 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7143 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7144 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7145 {
7146 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7147 * happens in real mode here based on the fnsave and fnstenv images. */
7148 pFpuCtx->CS = 0;
7149 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7150 }
7151 else
7152 {
7153 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7154 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7155 }
7156}
7157
7158
7159/**
7160 * Updates the x87.DS and FPUDP registers.
7161 *
7162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7163 * @param pFpuCtx The FPU context.
7164 * @param iEffSeg The effective segment register.
7165 * @param GCPtrEff The effective address relative to @a iEffSeg.
7166 */
7167DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7168{
7169 RTSEL sel;
7170 switch (iEffSeg)
7171 {
7172 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7173 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7174 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7175 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7176 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7177 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7178 default:
7179 AssertMsgFailed(("%d\n", iEffSeg));
7180 sel = pVCpu->cpum.GstCtx.ds.Sel;
7181 }
7182 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7183 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7184 {
7185 pFpuCtx->DS = 0;
7186 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7187 }
7188 else
7189 {
7190 pFpuCtx->DS = sel;
7191 pFpuCtx->FPUDP = GCPtrEff;
7192 }
7193}
7194
7195
7196/**
7197 * Rotates the stack registers in the push direction.
7198 *
7199 * @param pFpuCtx The FPU context.
7200 * @remarks This is a complete waste of time, but fxsave stores the registers in
7201 * stack order.
7202 */
7203DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7204{
7205 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7206 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7207 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7208 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7209 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7210 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7211 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7212 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7213 pFpuCtx->aRegs[0].r80 = r80Tmp;
7214}
7215
7216
7217/**
7218 * Rotates the stack registers in the pop direction.
7219 *
7220 * @param pFpuCtx The FPU context.
7221 * @remarks This is a complete waste of time, but fxsave stores the registers in
7222 * stack order.
7223 */
7224DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7225{
7226 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7227 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7228 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7229 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7230 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7231 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7232 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7233 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7234 pFpuCtx->aRegs[7].r80 = r80Tmp;
7235}
7236
7237
7238/**
7239 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7240 * exception prevents it.
7241 *
7242 * @param pResult The FPU operation result to push.
7243 * @param pFpuCtx The FPU context.
7244 */
7245IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7246{
7247 /* Update FSW and bail if there are pending exceptions afterwards. */
7248 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7249 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7250 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7251 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7252 {
7253 pFpuCtx->FSW = fFsw;
7254 return;
7255 }
7256
7257 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7258 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7259 {
7260 /* All is fine, push the actual value. */
7261 pFpuCtx->FTW |= RT_BIT(iNewTop);
7262 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7263 }
7264 else if (pFpuCtx->FCW & X86_FCW_IM)
7265 {
7266 /* Masked stack overflow, push QNaN. */
7267 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7268 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7269 }
7270 else
7271 {
7272 /* Raise stack overflow, don't push anything. */
7273 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7274 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7275 return;
7276 }
7277
7278 fFsw &= ~X86_FSW_TOP_MASK;
7279 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7280 pFpuCtx->FSW = fFsw;
7281
7282 iemFpuRotateStackPush(pFpuCtx);
7283}
7284
7285
7286/**
7287 * Stores a result in a FPU register and updates the FSW and FTW.
7288 *
7289 * @param pFpuCtx The FPU context.
7290 * @param pResult The result to store.
7291 * @param iStReg Which FPU register to store it in.
7292 */
7293IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7294{
7295 Assert(iStReg < 8);
7296 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7297 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7298 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7299 pFpuCtx->FTW |= RT_BIT(iReg);
7300 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7301}
7302
7303
7304/**
7305 * Only updates the FPU status word (FSW) with the result of the current
7306 * instruction.
7307 *
7308 * @param pFpuCtx The FPU context.
7309 * @param u16FSW The FSW output of the current instruction.
7310 */
7311IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7312{
7313 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7314 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7315}
7316
7317
7318/**
7319 * Pops one item off the FPU stack if no pending exception prevents it.
7320 *
7321 * @param pFpuCtx The FPU context.
7322 */
7323IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7324{
7325 /* Check pending exceptions. */
7326 uint16_t uFSW = pFpuCtx->FSW;
7327 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7328 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7329 return;
7330
7331 /* TOP--. */
7332 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7333 uFSW &= ~X86_FSW_TOP_MASK;
7334 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7335 pFpuCtx->FSW = uFSW;
7336
7337 /* Mark the previous ST0 as empty. */
7338 iOldTop >>= X86_FSW_TOP_SHIFT;
7339 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7340
7341 /* Rotate the registers. */
7342 iemFpuRotateStackPop(pFpuCtx);
7343}
7344
7345
7346/**
7347 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7348 *
7349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7350 * @param pResult The FPU operation result to push.
7351 */
7352IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7353{
7354 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7355 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7356 iemFpuMaybePushResult(pResult, pFpuCtx);
7357}
7358
7359
7360/**
7361 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7362 * and sets FPUDP and FPUDS.
7363 *
7364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7365 * @param pResult The FPU operation result to push.
7366 * @param iEffSeg The effective segment register.
7367 * @param GCPtrEff The effective address relative to @a iEffSeg.
7368 */
7369IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7370{
7371 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7372 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7373 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7374 iemFpuMaybePushResult(pResult, pFpuCtx);
7375}
7376
7377
7378/**
7379 * Replace ST0 with the first value and push the second onto the FPU stack,
7380 * unless a pending exception prevents it.
7381 *
7382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7383 * @param pResult The FPU operation result to store and push.
7384 */
7385IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7386{
7387 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7388 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7389
7390 /* Update FSW and bail if there are pending exceptions afterwards. */
7391 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7392 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7393 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7394 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7395 {
7396 pFpuCtx->FSW = fFsw;
7397 return;
7398 }
7399
7400 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7401 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7402 {
7403 /* All is fine, push the actual value. */
7404 pFpuCtx->FTW |= RT_BIT(iNewTop);
7405 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7406 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7407 }
7408 else if (pFpuCtx->FCW & X86_FCW_IM)
7409 {
7410 /* Masked stack overflow, push QNaN. */
7411 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7412 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7413 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7414 }
7415 else
7416 {
7417 /* Raise stack overflow, don't push anything. */
7418 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7419 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7420 return;
7421 }
7422
7423 fFsw &= ~X86_FSW_TOP_MASK;
7424 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7425 pFpuCtx->FSW = fFsw;
7426
7427 iemFpuRotateStackPush(pFpuCtx);
7428}
7429
7430
7431/**
7432 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7433 * FOP.
7434 *
7435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7436 * @param pResult The result to store.
7437 * @param iStReg Which FPU register to store it in.
7438 */
7439IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7440{
7441 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7442 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7443 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7444}
7445
7446
7447/**
7448 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7449 * FOP, and then pops the stack.
7450 *
7451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7452 * @param pResult The result to store.
7453 * @param iStReg Which FPU register to store it in.
7454 */
7455IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7456{
7457 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7458 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7459 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7460 iemFpuMaybePopOne(pFpuCtx);
7461}
7462
7463
7464/**
7465 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7466 * FPUDP, and FPUDS.
7467 *
7468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7469 * @param pResult The result to store.
7470 * @param iStReg Which FPU register to store it in.
7471 * @param iEffSeg The effective memory operand selector register.
7472 * @param GCPtrEff The effective memory operand offset.
7473 */
7474IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7475 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7476{
7477 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7478 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7479 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7480 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7481}
7482
7483
7484/**
7485 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7486 * FPUDP, and FPUDS, and then pops the stack.
7487 *
7488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7489 * @param pResult The result to store.
7490 * @param iStReg Which FPU register to store it in.
7491 * @param iEffSeg The effective memory operand selector register.
7492 * @param GCPtrEff The effective memory operand offset.
7493 */
7494IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7495 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7496{
7497 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7498 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7499 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7500 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7501 iemFpuMaybePopOne(pFpuCtx);
7502}
7503
7504
7505/**
7506 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7507 *
7508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7509 */
7510IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7511{
7512 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7513 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7514}
7515
7516
7517/**
7518 * Marks the specified stack register as free (for FFREE).
7519 *
7520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7521 * @param iStReg The register to free.
7522 */
7523IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7524{
7525 Assert(iStReg < 8);
7526 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7527 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7528 pFpuCtx->FTW &= ~RT_BIT(iReg);
7529}
7530
7531
7532/**
7533 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7534 *
7535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7536 */
7537IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7538{
7539 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7540 uint16_t uFsw = pFpuCtx->FSW;
7541 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7542 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7543 uFsw &= ~X86_FSW_TOP_MASK;
7544 uFsw |= uTop;
7545 pFpuCtx->FSW = uFsw;
7546}
7547
7548
7549/**
7550 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7551 *
7552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7553 */
7554IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7555{
7556 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7557 uint16_t uFsw = pFpuCtx->FSW;
7558 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7559 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7560 uFsw &= ~X86_FSW_TOP_MASK;
7561 uFsw |= uTop;
7562 pFpuCtx->FSW = uFsw;
7563}
7564
7565
7566/**
7567 * Updates the FSW, FOP, FPUIP, and FPUCS.
7568 *
7569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7570 * @param u16FSW The FSW from the current instruction.
7571 */
7572IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7573{
7574 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7575 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7576 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7577}
7578
7579
7580/**
7581 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7582 *
7583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7584 * @param u16FSW The FSW from the current instruction.
7585 */
7586IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7587{
7588 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7589 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7590 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7591 iemFpuMaybePopOne(pFpuCtx);
7592}
7593
7594
7595/**
7596 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7597 *
7598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7599 * @param u16FSW The FSW from the current instruction.
7600 * @param iEffSeg The effective memory operand selector register.
7601 * @param GCPtrEff The effective memory operand offset.
7602 */
7603IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7604{
7605 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7606 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7607 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7608 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7609}
7610
7611
7612/**
7613 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7614 *
7615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7616 * @param u16FSW The FSW from the current instruction.
7617 */
7618IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7619{
7620 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7621 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7622 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7623 iemFpuMaybePopOne(pFpuCtx);
7624 iemFpuMaybePopOne(pFpuCtx);
7625}
7626
7627
7628/**
7629 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7630 *
7631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7632 * @param u16FSW The FSW from the current instruction.
7633 * @param iEffSeg The effective memory operand selector register.
7634 * @param GCPtrEff The effective memory operand offset.
7635 */
7636IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7637{
7638 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7639 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7640 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7641 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7642 iemFpuMaybePopOne(pFpuCtx);
7643}
7644
7645
7646/**
7647 * Worker routine for raising an FPU stack underflow exception.
7648 *
7649 * @param pFpuCtx The FPU context.
7650 * @param iStReg The stack register being accessed.
7651 */
7652IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7653{
7654 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7655 if (pFpuCtx->FCW & X86_FCW_IM)
7656 {
7657 /* Masked underflow. */
7658 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7659 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7660 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7661 if (iStReg != UINT8_MAX)
7662 {
7663 pFpuCtx->FTW |= RT_BIT(iReg);
7664 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7665 }
7666 }
7667 else
7668 {
7669 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7670 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7671 }
7672}
7673
7674
7675/**
7676 * Raises a FPU stack underflow exception.
7677 *
7678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7679 * @param iStReg The destination register that should be loaded
7680 * with QNaN if \#IS is not masked. Specify
7681 * UINT8_MAX if none (like for fcom).
7682 */
7683DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7684{
7685 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7686 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7687 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7688}
7689
7690
7691DECL_NO_INLINE(IEM_STATIC, void)
7692iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7693{
7694 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7695 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7696 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7697 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7698}
7699
7700
7701DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7702{
7703 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7704 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7705 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7706 iemFpuMaybePopOne(pFpuCtx);
7707}
7708
7709
7710DECL_NO_INLINE(IEM_STATIC, void)
7711iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7712{
7713 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7714 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7715 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7716 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7717 iemFpuMaybePopOne(pFpuCtx);
7718}
7719
7720
7721DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7722{
7723 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7724 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7725 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7726 iemFpuMaybePopOne(pFpuCtx);
7727 iemFpuMaybePopOne(pFpuCtx);
7728}
7729
7730
7731DECL_NO_INLINE(IEM_STATIC, void)
7732iemFpuStackPushUnderflow(PVMCPU pVCpu)
7733{
7734 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7735 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7736
7737 if (pFpuCtx->FCW & X86_FCW_IM)
7738 {
7739 /* Masked overflow - Push QNaN. */
7740 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7741 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7742 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7743 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7744 pFpuCtx->FTW |= RT_BIT(iNewTop);
7745 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7746 iemFpuRotateStackPush(pFpuCtx);
7747 }
7748 else
7749 {
7750 /* Exception pending - don't change TOP or the register stack. */
7751 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7752 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7753 }
7754}
7755
7756
7757DECL_NO_INLINE(IEM_STATIC, void)
7758iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7759{
7760 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7761 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7762
7763 if (pFpuCtx->FCW & X86_FCW_IM)
7764 {
7765 /* Masked overflow - Push QNaN. */
7766 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7767 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7768 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7769 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7770 pFpuCtx->FTW |= RT_BIT(iNewTop);
7771 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7772 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7773 iemFpuRotateStackPush(pFpuCtx);
7774 }
7775 else
7776 {
7777 /* Exception pending - don't change TOP or the register stack. */
7778 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7779 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7780 }
7781}
7782
7783
7784/**
7785 * Worker routine for raising an FPU stack overflow exception on a push.
7786 *
7787 * @param pFpuCtx The FPU context.
7788 */
7789IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7790{
7791 if (pFpuCtx->FCW & X86_FCW_IM)
7792 {
7793 /* Masked overflow. */
7794 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7795 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7796 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7797 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7798 pFpuCtx->FTW |= RT_BIT(iNewTop);
7799 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7800 iemFpuRotateStackPush(pFpuCtx);
7801 }
7802 else
7803 {
7804 /* Exception pending - don't change TOP or the register stack. */
7805 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7806 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7807 }
7808}
7809
7810
7811/**
7812 * Raises a FPU stack overflow exception on a push.
7813 *
7814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7815 */
7816DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7817{
7818 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7819 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7820 iemFpuStackPushOverflowOnly(pFpuCtx);
7821}
7822
7823
7824/**
7825 * Raises a FPU stack overflow exception on a push with a memory operand.
7826 *
7827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7828 * @param iEffSeg The effective memory operand selector register.
7829 * @param GCPtrEff The effective memory operand offset.
7830 */
7831DECL_NO_INLINE(IEM_STATIC, void)
7832iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7833{
7834 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7835 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7836 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7837 iemFpuStackPushOverflowOnly(pFpuCtx);
7838}
7839
7840
7841IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7842{
7843 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7844 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7845 if (pFpuCtx->FTW & RT_BIT(iReg))
7846 return VINF_SUCCESS;
7847 return VERR_NOT_FOUND;
7848}
7849
7850
7851IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7852{
7853 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7854 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7855 if (pFpuCtx->FTW & RT_BIT(iReg))
7856 {
7857 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7858 return VINF_SUCCESS;
7859 }
7860 return VERR_NOT_FOUND;
7861}
7862
7863
7864IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7865 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7866{
7867 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7868 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7869 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7870 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7871 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7872 {
7873 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7874 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7875 return VINF_SUCCESS;
7876 }
7877 return VERR_NOT_FOUND;
7878}
7879
7880
7881IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7882{
7883 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7884 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7885 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7886 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7887 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7888 {
7889 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7890 return VINF_SUCCESS;
7891 }
7892 return VERR_NOT_FOUND;
7893}
7894
7895
7896/**
7897 * Updates the FPU exception status after FCW is changed.
7898 *
7899 * @param pFpuCtx The FPU context.
7900 */
7901IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7902{
7903 uint16_t u16Fsw = pFpuCtx->FSW;
7904 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7905 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7906 else
7907 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7908 pFpuCtx->FSW = u16Fsw;
7909}
7910
7911
7912/**
7913 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7914 *
7915 * @returns The full FTW.
7916 * @param pFpuCtx The FPU context.
7917 */
7918IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7919{
7920 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7921 uint16_t u16Ftw = 0;
7922 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7923 for (unsigned iSt = 0; iSt < 8; iSt++)
7924 {
7925 unsigned const iReg = (iSt + iTop) & 7;
7926 if (!(u8Ftw & RT_BIT(iReg)))
7927 u16Ftw |= 3 << (iReg * 2); /* empty */
7928 else
7929 {
7930 uint16_t uTag;
7931 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7932 if (pr80Reg->s.uExponent == 0x7fff)
7933 uTag = 2; /* Exponent is all 1's => Special. */
7934 else if (pr80Reg->s.uExponent == 0x0000)
7935 {
7936 if (pr80Reg->s.u64Mantissa == 0x0000)
7937 uTag = 1; /* All bits are zero => Zero. */
7938 else
7939 uTag = 2; /* Must be special. */
7940 }
7941 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7942 uTag = 0; /* Valid. */
7943 else
7944 uTag = 2; /* Must be special. */
7945
7946 u16Ftw |= uTag << (iReg * 2); /* empty */
7947 }
7948 }
7949
7950 return u16Ftw;
7951}
7952
7953
7954/**
7955 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7956 *
7957 * @returns The compressed FTW.
7958 * @param u16FullFtw The full FTW to convert.
7959 */
7960IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7961{
7962 uint8_t u8Ftw = 0;
7963 for (unsigned i = 0; i < 8; i++)
7964 {
7965 if ((u16FullFtw & 3) != 3 /*empty*/)
7966 u8Ftw |= RT_BIT(i);
7967 u16FullFtw >>= 2;
7968 }
7969
7970 return u8Ftw;
7971}
7972
7973/** @} */
7974
7975
7976/** @name Memory access.
7977 *
7978 * @{
7979 */
7980
7981
7982/**
7983 * Updates the IEMCPU::cbWritten counter if applicable.
7984 *
7985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7986 * @param fAccess The access being accounted for.
7987 * @param cbMem The access size.
7988 */
7989DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7990{
7991 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7992 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7993 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7994}
7995
7996
7997/**
7998 * Checks if the given segment can be written to, raise the appropriate
7999 * exception if not.
8000 *
8001 * @returns VBox strict status code.
8002 *
8003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8004 * @param pHid Pointer to the hidden register.
8005 * @param iSegReg The register number.
8006 * @param pu64BaseAddr Where to return the base address to use for the
8007 * segment. (In 64-bit code it may differ from the
8008 * base in the hidden segment.)
8009 */
8010IEM_STATIC VBOXSTRICTRC
8011iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8012{
8013 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8014
8015 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8016 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8017 else
8018 {
8019 if (!pHid->Attr.n.u1Present)
8020 {
8021 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8022 AssertRelease(uSel == 0);
8023 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8024 return iemRaiseGeneralProtectionFault0(pVCpu);
8025 }
8026
8027 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8028 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8029 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8030 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8031 *pu64BaseAddr = pHid->u64Base;
8032 }
8033 return VINF_SUCCESS;
8034}
8035
8036
8037/**
8038 * Checks if the given segment can be read from, raise the appropriate
8039 * exception if not.
8040 *
8041 * @returns VBox strict status code.
8042 *
8043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8044 * @param pHid Pointer to the hidden register.
8045 * @param iSegReg The register number.
8046 * @param pu64BaseAddr Where to return the base address to use for the
8047 * segment. (In 64-bit code it may differ from the
8048 * base in the hidden segment.)
8049 */
8050IEM_STATIC VBOXSTRICTRC
8051iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8052{
8053 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8054
8055 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8056 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8057 else
8058 {
8059 if (!pHid->Attr.n.u1Present)
8060 {
8061 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8062 AssertRelease(uSel == 0);
8063 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8064 return iemRaiseGeneralProtectionFault0(pVCpu);
8065 }
8066
8067 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8068 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8069 *pu64BaseAddr = pHid->u64Base;
8070 }
8071 return VINF_SUCCESS;
8072}
8073
8074
8075/**
8076 * Applies the segment limit, base and attributes.
8077 *
8078 * This may raise a \#GP or \#SS.
8079 *
8080 * @returns VBox strict status code.
8081 *
8082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8083 * @param fAccess The kind of access which is being performed.
8084 * @param iSegReg The index of the segment register to apply.
8085 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8086 * TSS, ++).
8087 * @param cbMem The access size.
8088 * @param pGCPtrMem Pointer to the guest memory address to apply
8089 * segmentation to. Input and output parameter.
8090 */
8091IEM_STATIC VBOXSTRICTRC
8092iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8093{
8094 if (iSegReg == UINT8_MAX)
8095 return VINF_SUCCESS;
8096
8097 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8098 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8099 switch (pVCpu->iem.s.enmCpuMode)
8100 {
8101 case IEMMODE_16BIT:
8102 case IEMMODE_32BIT:
8103 {
8104 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8105 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8106
8107 if ( pSel->Attr.n.u1Present
8108 && !pSel->Attr.n.u1Unusable)
8109 {
8110 Assert(pSel->Attr.n.u1DescType);
8111 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8112 {
8113 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8114 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8115 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8116
8117 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8118 {
8119 /** @todo CPL check. */
8120 }
8121
8122 /*
8123 * There are two kinds of data selectors, normal and expand down.
8124 */
8125 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8126 {
8127 if ( GCPtrFirst32 > pSel->u32Limit
8128 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8129 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8130 }
8131 else
8132 {
8133 /*
8134 * The upper boundary is defined by the B bit, not the G bit!
8135 */
8136 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8137 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8138 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8139 }
8140 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8141 }
8142 else
8143 {
8144
8145 /*
8146 * Code selector and usually be used to read thru, writing is
8147 * only permitted in real and V8086 mode.
8148 */
8149 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8150 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8151 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8152 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8153 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8154
8155 if ( GCPtrFirst32 > pSel->u32Limit
8156 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8157 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8158
8159 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8160 {
8161 /** @todo CPL check. */
8162 }
8163
8164 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8165 }
8166 }
8167 else
8168 return iemRaiseGeneralProtectionFault0(pVCpu);
8169 return VINF_SUCCESS;
8170 }
8171
8172 case IEMMODE_64BIT:
8173 {
8174 RTGCPTR GCPtrMem = *pGCPtrMem;
8175 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8176 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8177
8178 Assert(cbMem >= 1);
8179 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8180 return VINF_SUCCESS;
8181 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8182 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8183 return iemRaiseGeneralProtectionFault0(pVCpu);
8184 }
8185
8186 default:
8187 AssertFailedReturn(VERR_IEM_IPE_7);
8188 }
8189}
8190
8191
8192/**
8193 * Translates a virtual address to a physical physical address and checks if we
8194 * can access the page as specified.
8195 *
8196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8197 * @param GCPtrMem The virtual address.
8198 * @param fAccess The intended access.
8199 * @param pGCPhysMem Where to return the physical address.
8200 */
8201IEM_STATIC VBOXSTRICTRC
8202iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8203{
8204 /** @todo Need a different PGM interface here. We're currently using
8205 * generic / REM interfaces. this won't cut it for R0 & RC. */
8206 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8207 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8208 RTGCPHYS GCPhys;
8209 uint64_t fFlags;
8210 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8211 if (RT_FAILURE(rc))
8212 {
8213 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8214 /** @todo Check unassigned memory in unpaged mode. */
8215 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8216 *pGCPhysMem = NIL_RTGCPHYS;
8217 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8218 }
8219
8220 /* If the page is writable and does not have the no-exec bit set, all
8221 access is allowed. Otherwise we'll have to check more carefully... */
8222 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8223 {
8224 /* Write to read only memory? */
8225 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8226 && !(fFlags & X86_PTE_RW)
8227 && ( (pVCpu->iem.s.uCpl == 3
8228 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8229 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8230 {
8231 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8232 *pGCPhysMem = NIL_RTGCPHYS;
8233 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8234 }
8235
8236 /* Kernel memory accessed by userland? */
8237 if ( !(fFlags & X86_PTE_US)
8238 && pVCpu->iem.s.uCpl == 3
8239 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8240 {
8241 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8242 *pGCPhysMem = NIL_RTGCPHYS;
8243 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8244 }
8245
8246 /* Executing non-executable memory? */
8247 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8248 && (fFlags & X86_PTE_PAE_NX)
8249 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8250 {
8251 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8252 *pGCPhysMem = NIL_RTGCPHYS;
8253 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8254 VERR_ACCESS_DENIED);
8255 }
8256 }
8257
8258 /*
8259 * Set the dirty / access flags.
8260 * ASSUMES this is set when the address is translated rather than on committ...
8261 */
8262 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8263 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8264 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8265 {
8266 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8267 AssertRC(rc2);
8268 }
8269
8270 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8271 *pGCPhysMem = GCPhys;
8272 return VINF_SUCCESS;
8273}
8274
8275
8276
8277/**
8278 * Maps a physical page.
8279 *
8280 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8282 * @param GCPhysMem The physical address.
8283 * @param fAccess The intended access.
8284 * @param ppvMem Where to return the mapping address.
8285 * @param pLock The PGM lock.
8286 */
8287IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8288{
8289#ifdef IEM_LOG_MEMORY_WRITES
8290 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8291 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8292#endif
8293
8294 /** @todo This API may require some improving later. A private deal with PGM
8295 * regarding locking and unlocking needs to be struct. A couple of TLBs
8296 * living in PGM, but with publicly accessible inlined access methods
8297 * could perhaps be an even better solution. */
8298 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8299 GCPhysMem,
8300 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8301 pVCpu->iem.s.fBypassHandlers,
8302 ppvMem,
8303 pLock);
8304 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8305 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8306
8307 return rc;
8308}
8309
8310
8311/**
8312 * Unmap a page previously mapped by iemMemPageMap.
8313 *
8314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8315 * @param GCPhysMem The physical address.
8316 * @param fAccess The intended access.
8317 * @param pvMem What iemMemPageMap returned.
8318 * @param pLock The PGM lock.
8319 */
8320DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8321{
8322 NOREF(pVCpu);
8323 NOREF(GCPhysMem);
8324 NOREF(fAccess);
8325 NOREF(pvMem);
8326 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8327}
8328
8329
8330/**
8331 * Looks up a memory mapping entry.
8332 *
8333 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8335 * @param pvMem The memory address.
8336 * @param fAccess The access to.
8337 */
8338DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8339{
8340 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8341 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8342 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8343 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8344 return 0;
8345 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8346 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8347 return 1;
8348 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8349 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8350 return 2;
8351 return VERR_NOT_FOUND;
8352}
8353
8354
8355/**
8356 * Finds a free memmap entry when using iNextMapping doesn't work.
8357 *
8358 * @returns Memory mapping index, 1024 on failure.
8359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8360 */
8361IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8362{
8363 /*
8364 * The easy case.
8365 */
8366 if (pVCpu->iem.s.cActiveMappings == 0)
8367 {
8368 pVCpu->iem.s.iNextMapping = 1;
8369 return 0;
8370 }
8371
8372 /* There should be enough mappings for all instructions. */
8373 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8374
8375 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8376 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8377 return i;
8378
8379 AssertFailedReturn(1024);
8380}
8381
8382
8383/**
8384 * Commits a bounce buffer that needs writing back and unmaps it.
8385 *
8386 * @returns Strict VBox status code.
8387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8388 * @param iMemMap The index of the buffer to commit.
8389 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8390 * Always false in ring-3, obviously.
8391 */
8392IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8393{
8394 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8395 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8396#ifdef IN_RING3
8397 Assert(!fPostponeFail);
8398 RT_NOREF_PV(fPostponeFail);
8399#endif
8400
8401 /*
8402 * Do the writing.
8403 */
8404 PVM pVM = pVCpu->CTX_SUFF(pVM);
8405 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8406 {
8407 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8408 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8409 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8410 if (!pVCpu->iem.s.fBypassHandlers)
8411 {
8412 /*
8413 * Carefully and efficiently dealing with access handler return
8414 * codes make this a little bloated.
8415 */
8416 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8418 pbBuf,
8419 cbFirst,
8420 PGMACCESSORIGIN_IEM);
8421 if (rcStrict == VINF_SUCCESS)
8422 {
8423 if (cbSecond)
8424 {
8425 rcStrict = PGMPhysWrite(pVM,
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8427 pbBuf + cbFirst,
8428 cbSecond,
8429 PGMACCESSORIGIN_IEM);
8430 if (rcStrict == VINF_SUCCESS)
8431 { /* nothing */ }
8432 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8433 {
8434 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8437 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8438 }
8439#ifndef IN_RING3
8440 else if (fPostponeFail)
8441 {
8442 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8445 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8446 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8447 return iemSetPassUpStatus(pVCpu, rcStrict);
8448 }
8449#endif
8450 else
8451 {
8452 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8455 return rcStrict;
8456 }
8457 }
8458 }
8459 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8460 {
8461 if (!cbSecond)
8462 {
8463 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8465 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8466 }
8467 else
8468 {
8469 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8471 pbBuf + cbFirst,
8472 cbSecond,
8473 PGMACCESSORIGIN_IEM);
8474 if (rcStrict2 == VINF_SUCCESS)
8475 {
8476 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8479 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8480 }
8481 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8482 {
8483 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8485 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8486 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8487 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8488 }
8489#ifndef IN_RING3
8490 else if (fPostponeFail)
8491 {
8492 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8495 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8496 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8497 return iemSetPassUpStatus(pVCpu, rcStrict);
8498 }
8499#endif
8500 else
8501 {
8502 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8503 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8505 return rcStrict2;
8506 }
8507 }
8508 }
8509#ifndef IN_RING3
8510 else if (fPostponeFail)
8511 {
8512 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8513 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8514 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8515 if (!cbSecond)
8516 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8517 else
8518 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8519 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8520 return iemSetPassUpStatus(pVCpu, rcStrict);
8521 }
8522#endif
8523 else
8524 {
8525 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8526 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8527 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8528 return rcStrict;
8529 }
8530 }
8531 else
8532 {
8533 /*
8534 * No access handlers, much simpler.
8535 */
8536 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8537 if (RT_SUCCESS(rc))
8538 {
8539 if (cbSecond)
8540 {
8541 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8542 if (RT_SUCCESS(rc))
8543 { /* likely */ }
8544 else
8545 {
8546 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8548 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8549 return rc;
8550 }
8551 }
8552 }
8553 else
8554 {
8555 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8556 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8557 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8558 return rc;
8559 }
8560 }
8561 }
8562
8563#if defined(IEM_LOG_MEMORY_WRITES)
8564 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8565 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8566 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8567 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8568 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8569 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8570
8571 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8572 g_cbIemWrote = cbWrote;
8573 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8574#endif
8575
8576 /*
8577 * Free the mapping entry.
8578 */
8579 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8580 Assert(pVCpu->iem.s.cActiveMappings != 0);
8581 pVCpu->iem.s.cActiveMappings--;
8582 return VINF_SUCCESS;
8583}
8584
8585
8586/**
8587 * iemMemMap worker that deals with a request crossing pages.
8588 */
8589IEM_STATIC VBOXSTRICTRC
8590iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8591{
8592 /*
8593 * Do the address translations.
8594 */
8595 RTGCPHYS GCPhysFirst;
8596 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8597 if (rcStrict != VINF_SUCCESS)
8598 return rcStrict;
8599
8600 RTGCPHYS GCPhysSecond;
8601 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8602 fAccess, &GCPhysSecond);
8603 if (rcStrict != VINF_SUCCESS)
8604 return rcStrict;
8605 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8606
8607 PVM pVM = pVCpu->CTX_SUFF(pVM);
8608
8609 /*
8610 * Read in the current memory content if it's a read, execute or partial
8611 * write access.
8612 */
8613 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8614 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8615 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8616
8617 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8618 {
8619 if (!pVCpu->iem.s.fBypassHandlers)
8620 {
8621 /*
8622 * Must carefully deal with access handler status codes here,
8623 * makes the code a bit bloated.
8624 */
8625 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8626 if (rcStrict == VINF_SUCCESS)
8627 {
8628 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8629 if (rcStrict == VINF_SUCCESS)
8630 { /*likely */ }
8631 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8632 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8633 else
8634 {
8635 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8636 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8637 return rcStrict;
8638 }
8639 }
8640 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8641 {
8642 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8643 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8644 {
8645 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8646 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8647 }
8648 else
8649 {
8650 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8651 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8652 return rcStrict2;
8653 }
8654 }
8655 else
8656 {
8657 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8658 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8659 return rcStrict;
8660 }
8661 }
8662 else
8663 {
8664 /*
8665 * No informational status codes here, much more straight forward.
8666 */
8667 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8668 if (RT_SUCCESS(rc))
8669 {
8670 Assert(rc == VINF_SUCCESS);
8671 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8672 if (RT_SUCCESS(rc))
8673 Assert(rc == VINF_SUCCESS);
8674 else
8675 {
8676 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8677 return rc;
8678 }
8679 }
8680 else
8681 {
8682 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8683 return rc;
8684 }
8685 }
8686 }
8687#ifdef VBOX_STRICT
8688 else
8689 memset(pbBuf, 0xcc, cbMem);
8690 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8691 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8692#endif
8693
8694 /*
8695 * Commit the bounce buffer entry.
8696 */
8697 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8698 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8699 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8700 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8701 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8702 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8703 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8704 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8705 pVCpu->iem.s.cActiveMappings++;
8706
8707 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8708 *ppvMem = pbBuf;
8709 return VINF_SUCCESS;
8710}
8711
8712
8713/**
8714 * iemMemMap woker that deals with iemMemPageMap failures.
8715 */
8716IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8717 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8718{
8719 /*
8720 * Filter out conditions we can handle and the ones which shouldn't happen.
8721 */
8722 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8723 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8724 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8725 {
8726 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8727 return rcMap;
8728 }
8729 pVCpu->iem.s.cPotentialExits++;
8730
8731 /*
8732 * Read in the current memory content if it's a read, execute or partial
8733 * write access.
8734 */
8735 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8736 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8737 {
8738 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8739 memset(pbBuf, 0xff, cbMem);
8740 else
8741 {
8742 int rc;
8743 if (!pVCpu->iem.s.fBypassHandlers)
8744 {
8745 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8746 if (rcStrict == VINF_SUCCESS)
8747 { /* nothing */ }
8748 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8749 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8750 else
8751 {
8752 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8753 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8754 return rcStrict;
8755 }
8756 }
8757 else
8758 {
8759 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8760 if (RT_SUCCESS(rc))
8761 { /* likely */ }
8762 else
8763 {
8764 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8765 GCPhysFirst, rc));
8766 return rc;
8767 }
8768 }
8769 }
8770 }
8771#ifdef VBOX_STRICT
8772 else
8773 memset(pbBuf, 0xcc, cbMem);
8774#endif
8775#ifdef VBOX_STRICT
8776 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8777 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8778#endif
8779
8780 /*
8781 * Commit the bounce buffer entry.
8782 */
8783 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8784 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8785 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8786 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8787 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8788 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8789 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8790 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8791 pVCpu->iem.s.cActiveMappings++;
8792
8793 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8794 *ppvMem = pbBuf;
8795 return VINF_SUCCESS;
8796}
8797
8798
8799
8800/**
8801 * Maps the specified guest memory for the given kind of access.
8802 *
8803 * This may be using bounce buffering of the memory if it's crossing a page
8804 * boundary or if there is an access handler installed for any of it. Because
8805 * of lock prefix guarantees, we're in for some extra clutter when this
8806 * happens.
8807 *
8808 * This may raise a \#GP, \#SS, \#PF or \#AC.
8809 *
8810 * @returns VBox strict status code.
8811 *
8812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8813 * @param ppvMem Where to return the pointer to the mapped
8814 * memory.
8815 * @param cbMem The number of bytes to map. This is usually 1,
8816 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8817 * string operations it can be up to a page.
8818 * @param iSegReg The index of the segment register to use for
8819 * this access. The base and limits are checked.
8820 * Use UINT8_MAX to indicate that no segmentation
8821 * is required (for IDT, GDT and LDT accesses).
8822 * @param GCPtrMem The address of the guest memory.
8823 * @param fAccess How the memory is being accessed. The
8824 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8825 * how to map the memory, while the
8826 * IEM_ACCESS_WHAT_XXX bit is used when raising
8827 * exceptions.
8828 */
8829IEM_STATIC VBOXSTRICTRC
8830iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8831{
8832 /*
8833 * Check the input and figure out which mapping entry to use.
8834 */
8835 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8836 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8837 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8838
8839 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8840 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8841 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8842 {
8843 iMemMap = iemMemMapFindFree(pVCpu);
8844 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8845 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8846 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8847 pVCpu->iem.s.aMemMappings[2].fAccess),
8848 VERR_IEM_IPE_9);
8849 }
8850
8851 /*
8852 * Map the memory, checking that we can actually access it. If something
8853 * slightly complicated happens, fall back on bounce buffering.
8854 */
8855 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8856 if (rcStrict != VINF_SUCCESS)
8857 return rcStrict;
8858
8859 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8860 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8861
8862 RTGCPHYS GCPhysFirst;
8863 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8864 if (rcStrict != VINF_SUCCESS)
8865 return rcStrict;
8866
8867 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8868 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8869 if (fAccess & IEM_ACCESS_TYPE_READ)
8870 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8871
8872 void *pvMem;
8873 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8874 if (rcStrict != VINF_SUCCESS)
8875 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8876
8877 /*
8878 * Fill in the mapping table entry.
8879 */
8880 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8881 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8882 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8883 pVCpu->iem.s.cActiveMappings++;
8884
8885 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8886 *ppvMem = pvMem;
8887
8888 return VINF_SUCCESS;
8889}
8890
8891
8892/**
8893 * Commits the guest memory if bounce buffered and unmaps it.
8894 *
8895 * @returns Strict VBox status code.
8896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8897 * @param pvMem The mapping.
8898 * @param fAccess The kind of access.
8899 */
8900IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8901{
8902 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8903 AssertReturn(iMemMap >= 0, iMemMap);
8904
8905 /* If it's bounce buffered, we may need to write back the buffer. */
8906 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8907 {
8908 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8909 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8910 }
8911 /* Otherwise unlock it. */
8912 else
8913 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8914
8915 /* Free the entry. */
8916 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8917 Assert(pVCpu->iem.s.cActiveMappings != 0);
8918 pVCpu->iem.s.cActiveMappings--;
8919 return VINF_SUCCESS;
8920}
8921
8922#ifdef IEM_WITH_SETJMP
8923
8924/**
8925 * Maps the specified guest memory for the given kind of access, longjmp on
8926 * error.
8927 *
8928 * This may be using bounce buffering of the memory if it's crossing a page
8929 * boundary or if there is an access handler installed for any of it. Because
8930 * of lock prefix guarantees, we're in for some extra clutter when this
8931 * happens.
8932 *
8933 * This may raise a \#GP, \#SS, \#PF or \#AC.
8934 *
8935 * @returns Pointer to the mapped memory.
8936 *
8937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8938 * @param cbMem The number of bytes to map. This is usually 1,
8939 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8940 * string operations it can be up to a page.
8941 * @param iSegReg The index of the segment register to use for
8942 * this access. The base and limits are checked.
8943 * Use UINT8_MAX to indicate that no segmentation
8944 * is required (for IDT, GDT and LDT accesses).
8945 * @param GCPtrMem The address of the guest memory.
8946 * @param fAccess How the memory is being accessed. The
8947 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8948 * how to map the memory, while the
8949 * IEM_ACCESS_WHAT_XXX bit is used when raising
8950 * exceptions.
8951 */
8952IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8953{
8954 /*
8955 * Check the input and figure out which mapping entry to use.
8956 */
8957 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8958 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8959 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8960
8961 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8962 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8963 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8964 {
8965 iMemMap = iemMemMapFindFree(pVCpu);
8966 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8967 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8968 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8969 pVCpu->iem.s.aMemMappings[2].fAccess),
8970 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8971 }
8972
8973 /*
8974 * Map the memory, checking that we can actually access it. If something
8975 * slightly complicated happens, fall back on bounce buffering.
8976 */
8977 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8978 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8979 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8980
8981 /* Crossing a page boundary? */
8982 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8983 { /* No (likely). */ }
8984 else
8985 {
8986 void *pvMem;
8987 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8988 if (rcStrict == VINF_SUCCESS)
8989 return pvMem;
8990 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8991 }
8992
8993 RTGCPHYS GCPhysFirst;
8994 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8995 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8996 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8997
8998 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8999 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9000 if (fAccess & IEM_ACCESS_TYPE_READ)
9001 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9002
9003 void *pvMem;
9004 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9005 if (rcStrict == VINF_SUCCESS)
9006 { /* likely */ }
9007 else
9008 {
9009 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9010 if (rcStrict == VINF_SUCCESS)
9011 return pvMem;
9012 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9013 }
9014
9015 /*
9016 * Fill in the mapping table entry.
9017 */
9018 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9019 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9020 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9021 pVCpu->iem.s.cActiveMappings++;
9022
9023 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9024 return pvMem;
9025}
9026
9027
9028/**
9029 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9030 *
9031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9032 * @param pvMem The mapping.
9033 * @param fAccess The kind of access.
9034 */
9035IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9036{
9037 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9038 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9039
9040 /* If it's bounce buffered, we may need to write back the buffer. */
9041 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9042 {
9043 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9044 {
9045 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9046 if (rcStrict == VINF_SUCCESS)
9047 return;
9048 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9049 }
9050 }
9051 /* Otherwise unlock it. */
9052 else
9053 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9054
9055 /* Free the entry. */
9056 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9057 Assert(pVCpu->iem.s.cActiveMappings != 0);
9058 pVCpu->iem.s.cActiveMappings--;
9059}
9060
9061#endif /* IEM_WITH_SETJMP */
9062
9063#ifndef IN_RING3
9064/**
9065 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9066 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9067 *
9068 * Allows the instruction to be completed and retired, while the IEM user will
9069 * return to ring-3 immediately afterwards and do the postponed writes there.
9070 *
9071 * @returns VBox status code (no strict statuses). Caller must check
9072 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param pvMem The mapping.
9075 * @param fAccess The kind of access.
9076 */
9077IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9078{
9079 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9080 AssertReturn(iMemMap >= 0, iMemMap);
9081
9082 /* If it's bounce buffered, we may need to write back the buffer. */
9083 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9084 {
9085 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9086 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9087 }
9088 /* Otherwise unlock it. */
9089 else
9090 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9091
9092 /* Free the entry. */
9093 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9094 Assert(pVCpu->iem.s.cActiveMappings != 0);
9095 pVCpu->iem.s.cActiveMappings--;
9096 return VINF_SUCCESS;
9097}
9098#endif
9099
9100
9101/**
9102 * Rollbacks mappings, releasing page locks and such.
9103 *
9104 * The caller shall only call this after checking cActiveMappings.
9105 *
9106 * @returns Strict VBox status code to pass up.
9107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9108 */
9109IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9110{
9111 Assert(pVCpu->iem.s.cActiveMappings > 0);
9112
9113 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9114 while (iMemMap-- > 0)
9115 {
9116 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9117 if (fAccess != IEM_ACCESS_INVALID)
9118 {
9119 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9120 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9121 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9122 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9123 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9124 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9125 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9126 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9127 pVCpu->iem.s.cActiveMappings--;
9128 }
9129 }
9130}
9131
9132
9133/**
9134 * Fetches a data byte.
9135 *
9136 * @returns Strict VBox status code.
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param pu8Dst Where to return the byte.
9139 * @param iSegReg The index of the segment register to use for
9140 * this access. The base and limits are checked.
9141 * @param GCPtrMem The address of the guest memory.
9142 */
9143IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9144{
9145 /* The lazy approach for now... */
9146 uint8_t const *pu8Src;
9147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9148 if (rc == VINF_SUCCESS)
9149 {
9150 *pu8Dst = *pu8Src;
9151 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9152 }
9153 return rc;
9154}
9155
9156
9157#ifdef IEM_WITH_SETJMP
9158/**
9159 * Fetches a data byte, longjmp on error.
9160 *
9161 * @returns The byte.
9162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9163 * @param iSegReg The index of the segment register to use for
9164 * this access. The base and limits are checked.
9165 * @param GCPtrMem The address of the guest memory.
9166 */
9167DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9168{
9169 /* The lazy approach for now... */
9170 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9171 uint8_t const bRet = *pu8Src;
9172 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9173 return bRet;
9174}
9175#endif /* IEM_WITH_SETJMP */
9176
9177
9178/**
9179 * Fetches a data word.
9180 *
9181 * @returns Strict VBox status code.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param pu16Dst Where to return the word.
9184 * @param iSegReg The index of the segment register to use for
9185 * this access. The base and limits are checked.
9186 * @param GCPtrMem The address of the guest memory.
9187 */
9188IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9189{
9190 /* The lazy approach for now... */
9191 uint16_t const *pu16Src;
9192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9193 if (rc == VINF_SUCCESS)
9194 {
9195 *pu16Dst = *pu16Src;
9196 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9197 }
9198 return rc;
9199}
9200
9201
9202#ifdef IEM_WITH_SETJMP
9203/**
9204 * Fetches a data word, longjmp on error.
9205 *
9206 * @returns The word
9207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9208 * @param iSegReg The index of the segment register to use for
9209 * this access. The base and limits are checked.
9210 * @param GCPtrMem The address of the guest memory.
9211 */
9212DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9213{
9214 /* The lazy approach for now... */
9215 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9216 uint16_t const u16Ret = *pu16Src;
9217 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9218 return u16Ret;
9219}
9220#endif
9221
9222
9223/**
9224 * Fetches a data dword.
9225 *
9226 * @returns Strict VBox status code.
9227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9228 * @param pu32Dst Where to return the dword.
9229 * @param iSegReg The index of the segment register to use for
9230 * this access. The base and limits are checked.
9231 * @param GCPtrMem The address of the guest memory.
9232 */
9233IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9234{
9235 /* The lazy approach for now... */
9236 uint32_t const *pu32Src;
9237 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9238 if (rc == VINF_SUCCESS)
9239 {
9240 *pu32Dst = *pu32Src;
9241 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9242 }
9243 return rc;
9244}
9245
9246
9247#ifdef IEM_WITH_SETJMP
9248
9249IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9250{
9251 Assert(cbMem >= 1);
9252 Assert(iSegReg < X86_SREG_COUNT);
9253
9254 /*
9255 * 64-bit mode is simpler.
9256 */
9257 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9258 {
9259 if (iSegReg >= X86_SREG_FS)
9260 {
9261 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9262 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9263 GCPtrMem += pSel->u64Base;
9264 }
9265
9266 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9267 return GCPtrMem;
9268 }
9269 /*
9270 * 16-bit and 32-bit segmentation.
9271 */
9272 else
9273 {
9274 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9275 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9276 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9277 == X86DESCATTR_P /* data, expand up */
9278 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9279 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9280 {
9281 /* expand up */
9282 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9283 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9284 && GCPtrLast32 > (uint32_t)GCPtrMem))
9285 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9286 }
9287 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9288 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9289 {
9290 /* expand down */
9291 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9292 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9293 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9294 && GCPtrLast32 > (uint32_t)GCPtrMem))
9295 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9296 }
9297 else
9298 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9299 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9300 }
9301 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9302}
9303
9304
9305IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9306{
9307 Assert(cbMem >= 1);
9308 Assert(iSegReg < X86_SREG_COUNT);
9309
9310 /*
9311 * 64-bit mode is simpler.
9312 */
9313 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9314 {
9315 if (iSegReg >= X86_SREG_FS)
9316 {
9317 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9318 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9319 GCPtrMem += pSel->u64Base;
9320 }
9321
9322 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9323 return GCPtrMem;
9324 }
9325 /*
9326 * 16-bit and 32-bit segmentation.
9327 */
9328 else
9329 {
9330 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9331 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9332 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9333 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9334 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9335 {
9336 /* expand up */
9337 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9338 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9339 && GCPtrLast32 > (uint32_t)GCPtrMem))
9340 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9341 }
9342 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9343 {
9344 /* expand down */
9345 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9346 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9347 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9348 && GCPtrLast32 > (uint32_t)GCPtrMem))
9349 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9350 }
9351 else
9352 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9353 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9354 }
9355 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9356}
9357
9358
9359/**
9360 * Fetches a data dword, longjmp on error, fallback/safe version.
9361 *
9362 * @returns The dword
9363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9364 * @param iSegReg The index of the segment register to use for
9365 * this access. The base and limits are checked.
9366 * @param GCPtrMem The address of the guest memory.
9367 */
9368IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9369{
9370 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9371 uint32_t const u32Ret = *pu32Src;
9372 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9373 return u32Ret;
9374}
9375
9376
9377/**
9378 * Fetches a data dword, longjmp on error.
9379 *
9380 * @returns The dword
9381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9382 * @param iSegReg The index of the segment register to use for
9383 * this access. The base and limits are checked.
9384 * @param GCPtrMem The address of the guest memory.
9385 */
9386DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9387{
9388# ifdef IEM_WITH_DATA_TLB
9389 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9390 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9391 {
9392 /// @todo more later.
9393 }
9394
9395 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9396# else
9397 /* The lazy approach. */
9398 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9399 uint32_t const u32Ret = *pu32Src;
9400 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9401 return u32Ret;
9402# endif
9403}
9404#endif
9405
9406
9407#ifdef SOME_UNUSED_FUNCTION
9408/**
9409 * Fetches a data dword and sign extends it to a qword.
9410 *
9411 * @returns Strict VBox status code.
9412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9413 * @param pu64Dst Where to return the sign extended value.
9414 * @param iSegReg The index of the segment register to use for
9415 * this access. The base and limits are checked.
9416 * @param GCPtrMem The address of the guest memory.
9417 */
9418IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9419{
9420 /* The lazy approach for now... */
9421 int32_t const *pi32Src;
9422 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9423 if (rc == VINF_SUCCESS)
9424 {
9425 *pu64Dst = *pi32Src;
9426 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9427 }
9428#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9429 else
9430 *pu64Dst = 0;
9431#endif
9432 return rc;
9433}
9434#endif
9435
9436
9437/**
9438 * Fetches a data qword.
9439 *
9440 * @returns Strict VBox status code.
9441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9442 * @param pu64Dst Where to return the qword.
9443 * @param iSegReg The index of the segment register to use for
9444 * this access. The base and limits are checked.
9445 * @param GCPtrMem The address of the guest memory.
9446 */
9447IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9448{
9449 /* The lazy approach for now... */
9450 uint64_t const *pu64Src;
9451 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9452 if (rc == VINF_SUCCESS)
9453 {
9454 *pu64Dst = *pu64Src;
9455 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9456 }
9457 return rc;
9458}
9459
9460
9461#ifdef IEM_WITH_SETJMP
9462/**
9463 * Fetches a data qword, longjmp on error.
9464 *
9465 * @returns The qword.
9466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9467 * @param iSegReg The index of the segment register to use for
9468 * this access. The base and limits are checked.
9469 * @param GCPtrMem The address of the guest memory.
9470 */
9471DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9472{
9473 /* The lazy approach for now... */
9474 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9475 uint64_t const u64Ret = *pu64Src;
9476 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9477 return u64Ret;
9478}
9479#endif
9480
9481
9482/**
9483 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9484 *
9485 * @returns Strict VBox status code.
9486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9487 * @param pu64Dst Where to return the qword.
9488 * @param iSegReg The index of the segment register to use for
9489 * this access. The base and limits are checked.
9490 * @param GCPtrMem The address of the guest memory.
9491 */
9492IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9493{
9494 /* The lazy approach for now... */
9495 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9496 if (RT_UNLIKELY(GCPtrMem & 15))
9497 return iemRaiseGeneralProtectionFault0(pVCpu);
9498
9499 uint64_t const *pu64Src;
9500 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9501 if (rc == VINF_SUCCESS)
9502 {
9503 *pu64Dst = *pu64Src;
9504 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9505 }
9506 return rc;
9507}
9508
9509
9510#ifdef IEM_WITH_SETJMP
9511/**
9512 * Fetches a data qword, longjmp on error.
9513 *
9514 * @returns The qword.
9515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9516 * @param iSegReg The index of the segment register to use for
9517 * this access. The base and limits are checked.
9518 * @param GCPtrMem The address of the guest memory.
9519 */
9520DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9521{
9522 /* The lazy approach for now... */
9523 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9524 if (RT_LIKELY(!(GCPtrMem & 15)))
9525 {
9526 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9527 uint64_t const u64Ret = *pu64Src;
9528 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9529 return u64Ret;
9530 }
9531
9532 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9533 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9534}
9535#endif
9536
9537
9538/**
9539 * Fetches a data tword.
9540 *
9541 * @returns Strict VBox status code.
9542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9543 * @param pr80Dst Where to return the tword.
9544 * @param iSegReg The index of the segment register to use for
9545 * this access. The base and limits are checked.
9546 * @param GCPtrMem The address of the guest memory.
9547 */
9548IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9549{
9550 /* The lazy approach for now... */
9551 PCRTFLOAT80U pr80Src;
9552 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9553 if (rc == VINF_SUCCESS)
9554 {
9555 *pr80Dst = *pr80Src;
9556 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9557 }
9558 return rc;
9559}
9560
9561
9562#ifdef IEM_WITH_SETJMP
9563/**
9564 * Fetches a data tword, longjmp on error.
9565 *
9566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9567 * @param pr80Dst Where to return the tword.
9568 * @param iSegReg The index of the segment register to use for
9569 * this access. The base and limits are checked.
9570 * @param GCPtrMem The address of the guest memory.
9571 */
9572DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9573{
9574 /* The lazy approach for now... */
9575 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9576 *pr80Dst = *pr80Src;
9577 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9578}
9579#endif
9580
9581
9582/**
9583 * Fetches a data dqword (double qword), generally SSE related.
9584 *
9585 * @returns Strict VBox status code.
9586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9587 * @param pu128Dst Where to return the qword.
9588 * @param iSegReg The index of the segment register to use for
9589 * this access. The base and limits are checked.
9590 * @param GCPtrMem The address of the guest memory.
9591 */
9592IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9593{
9594 /* The lazy approach for now... */
9595 PCRTUINT128U pu128Src;
9596 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9597 if (rc == VINF_SUCCESS)
9598 {
9599 pu128Dst->au64[0] = pu128Src->au64[0];
9600 pu128Dst->au64[1] = pu128Src->au64[1];
9601 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9602 }
9603 return rc;
9604}
9605
9606
9607#ifdef IEM_WITH_SETJMP
9608/**
9609 * Fetches a data dqword (double qword), generally SSE related.
9610 *
9611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9612 * @param pu128Dst Where to return the qword.
9613 * @param iSegReg The index of the segment register to use for
9614 * this access. The base and limits are checked.
9615 * @param GCPtrMem The address of the guest memory.
9616 */
9617IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9618{
9619 /* The lazy approach for now... */
9620 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9621 pu128Dst->au64[0] = pu128Src->au64[0];
9622 pu128Dst->au64[1] = pu128Src->au64[1];
9623 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9624}
9625#endif
9626
9627
9628/**
9629 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9630 * related.
9631 *
9632 * Raises \#GP(0) if not aligned.
9633 *
9634 * @returns Strict VBox status code.
9635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9636 * @param pu128Dst Where to return the qword.
9637 * @param iSegReg The index of the segment register to use for
9638 * this access. The base and limits are checked.
9639 * @param GCPtrMem The address of the guest memory.
9640 */
9641IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9642{
9643 /* The lazy approach for now... */
9644 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9645 if ( (GCPtrMem & 15)
9646 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9647 return iemRaiseGeneralProtectionFault0(pVCpu);
9648
9649 PCRTUINT128U pu128Src;
9650 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9651 if (rc == VINF_SUCCESS)
9652 {
9653 pu128Dst->au64[0] = pu128Src->au64[0];
9654 pu128Dst->au64[1] = pu128Src->au64[1];
9655 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9656 }
9657 return rc;
9658}
9659
9660
9661#ifdef IEM_WITH_SETJMP
9662/**
9663 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9664 * related, longjmp on error.
9665 *
9666 * Raises \#GP(0) if not aligned.
9667 *
9668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9669 * @param pu128Dst Where to return the qword.
9670 * @param iSegReg The index of the segment register to use for
9671 * this access. The base and limits are checked.
9672 * @param GCPtrMem The address of the guest memory.
9673 */
9674DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9675{
9676 /* The lazy approach for now... */
9677 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9678 if ( (GCPtrMem & 15) == 0
9679 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9680 {
9681 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9682 pu128Dst->au64[0] = pu128Src->au64[0];
9683 pu128Dst->au64[1] = pu128Src->au64[1];
9684 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9685 return;
9686 }
9687
9688 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9689 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9690}
9691#endif
9692
9693
9694/**
9695 * Fetches a data oword (octo word), generally AVX related.
9696 *
9697 * @returns Strict VBox status code.
9698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9699 * @param pu256Dst Where to return the qword.
9700 * @param iSegReg The index of the segment register to use for
9701 * this access. The base and limits are checked.
9702 * @param GCPtrMem The address of the guest memory.
9703 */
9704IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9705{
9706 /* The lazy approach for now... */
9707 PCRTUINT256U pu256Src;
9708 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9709 if (rc == VINF_SUCCESS)
9710 {
9711 pu256Dst->au64[0] = pu256Src->au64[0];
9712 pu256Dst->au64[1] = pu256Src->au64[1];
9713 pu256Dst->au64[2] = pu256Src->au64[2];
9714 pu256Dst->au64[3] = pu256Src->au64[3];
9715 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9716 }
9717 return rc;
9718}
9719
9720
9721#ifdef IEM_WITH_SETJMP
9722/**
9723 * Fetches a data oword (octo word), generally AVX related.
9724 *
9725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9726 * @param pu256Dst Where to return the qword.
9727 * @param iSegReg The index of the segment register to use for
9728 * this access. The base and limits are checked.
9729 * @param GCPtrMem The address of the guest memory.
9730 */
9731IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9732{
9733 /* The lazy approach for now... */
9734 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9735 pu256Dst->au64[0] = pu256Src->au64[0];
9736 pu256Dst->au64[1] = pu256Src->au64[1];
9737 pu256Dst->au64[2] = pu256Src->au64[2];
9738 pu256Dst->au64[3] = pu256Src->au64[3];
9739 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9740}
9741#endif
9742
9743
9744/**
9745 * Fetches a data oword (octo word) at an aligned address, generally AVX
9746 * related.
9747 *
9748 * Raises \#GP(0) if not aligned.
9749 *
9750 * @returns Strict VBox status code.
9751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9752 * @param pu256Dst Where to return the qword.
9753 * @param iSegReg The index of the segment register to use for
9754 * this access. The base and limits are checked.
9755 * @param GCPtrMem The address of the guest memory.
9756 */
9757IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9758{
9759 /* The lazy approach for now... */
9760 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9761 if (GCPtrMem & 31)
9762 return iemRaiseGeneralProtectionFault0(pVCpu);
9763
9764 PCRTUINT256U pu256Src;
9765 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9766 if (rc == VINF_SUCCESS)
9767 {
9768 pu256Dst->au64[0] = pu256Src->au64[0];
9769 pu256Dst->au64[1] = pu256Src->au64[1];
9770 pu256Dst->au64[2] = pu256Src->au64[2];
9771 pu256Dst->au64[3] = pu256Src->au64[3];
9772 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9773 }
9774 return rc;
9775}
9776
9777
9778#ifdef IEM_WITH_SETJMP
9779/**
9780 * Fetches a data oword (octo word) at an aligned address, generally AVX
9781 * related, longjmp on error.
9782 *
9783 * Raises \#GP(0) if not aligned.
9784 *
9785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9786 * @param pu256Dst Where to return the qword.
9787 * @param iSegReg The index of the segment register to use for
9788 * this access. The base and limits are checked.
9789 * @param GCPtrMem The address of the guest memory.
9790 */
9791DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9792{
9793 /* The lazy approach for now... */
9794 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9795 if ((GCPtrMem & 31) == 0)
9796 {
9797 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9798 pu256Dst->au64[0] = pu256Src->au64[0];
9799 pu256Dst->au64[1] = pu256Src->au64[1];
9800 pu256Dst->au64[2] = pu256Src->au64[2];
9801 pu256Dst->au64[3] = pu256Src->au64[3];
9802 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9803 return;
9804 }
9805
9806 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9807 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9808}
9809#endif
9810
9811
9812
9813/**
9814 * Fetches a descriptor register (lgdt, lidt).
9815 *
9816 * @returns Strict VBox status code.
9817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9818 * @param pcbLimit Where to return the limit.
9819 * @param pGCPtrBase Where to return the base.
9820 * @param iSegReg The index of the segment register to use for
9821 * this access. The base and limits are checked.
9822 * @param GCPtrMem The address of the guest memory.
9823 * @param enmOpSize The effective operand size.
9824 */
9825IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9826 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9827{
9828 /*
9829 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9830 * little special:
9831 * - The two reads are done separately.
9832 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9833 * - We suspect the 386 to actually commit the limit before the base in
9834 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9835 * don't try emulate this eccentric behavior, because it's not well
9836 * enough understood and rather hard to trigger.
9837 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9838 */
9839 VBOXSTRICTRC rcStrict;
9840 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9841 {
9842 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9843 if (rcStrict == VINF_SUCCESS)
9844 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9845 }
9846 else
9847 {
9848 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9849 if (enmOpSize == IEMMODE_32BIT)
9850 {
9851 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9852 {
9853 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9854 if (rcStrict == VINF_SUCCESS)
9855 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9856 }
9857 else
9858 {
9859 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9860 if (rcStrict == VINF_SUCCESS)
9861 {
9862 *pcbLimit = (uint16_t)uTmp;
9863 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9864 }
9865 }
9866 if (rcStrict == VINF_SUCCESS)
9867 *pGCPtrBase = uTmp;
9868 }
9869 else
9870 {
9871 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9872 if (rcStrict == VINF_SUCCESS)
9873 {
9874 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9875 if (rcStrict == VINF_SUCCESS)
9876 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9877 }
9878 }
9879 }
9880 return rcStrict;
9881}
9882
9883
9884
9885/**
9886 * Stores a data byte.
9887 *
9888 * @returns Strict VBox status code.
9889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9890 * @param iSegReg The index of the segment register to use for
9891 * this access. The base and limits are checked.
9892 * @param GCPtrMem The address of the guest memory.
9893 * @param u8Value The value to store.
9894 */
9895IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9896{
9897 /* The lazy approach for now... */
9898 uint8_t *pu8Dst;
9899 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9900 if (rc == VINF_SUCCESS)
9901 {
9902 *pu8Dst = u8Value;
9903 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9904 }
9905 return rc;
9906}
9907
9908
9909#ifdef IEM_WITH_SETJMP
9910/**
9911 * Stores a data byte, longjmp on error.
9912 *
9913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9914 * @param iSegReg The index of the segment register to use for
9915 * this access. The base and limits are checked.
9916 * @param GCPtrMem The address of the guest memory.
9917 * @param u8Value The value to store.
9918 */
9919IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9920{
9921 /* The lazy approach for now... */
9922 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9923 *pu8Dst = u8Value;
9924 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9925}
9926#endif
9927
9928
9929/**
9930 * Stores a data word.
9931 *
9932 * @returns Strict VBox status code.
9933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9934 * @param iSegReg The index of the segment register to use for
9935 * this access. The base and limits are checked.
9936 * @param GCPtrMem The address of the guest memory.
9937 * @param u16Value The value to store.
9938 */
9939IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9940{
9941 /* The lazy approach for now... */
9942 uint16_t *pu16Dst;
9943 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9944 if (rc == VINF_SUCCESS)
9945 {
9946 *pu16Dst = u16Value;
9947 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9948 }
9949 return rc;
9950}
9951
9952
9953#ifdef IEM_WITH_SETJMP
9954/**
9955 * Stores a data word, longjmp on error.
9956 *
9957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9958 * @param iSegReg The index of the segment register to use for
9959 * this access. The base and limits are checked.
9960 * @param GCPtrMem The address of the guest memory.
9961 * @param u16Value The value to store.
9962 */
9963IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9964{
9965 /* The lazy approach for now... */
9966 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9967 *pu16Dst = u16Value;
9968 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9969}
9970#endif
9971
9972
9973/**
9974 * Stores a data dword.
9975 *
9976 * @returns Strict VBox status code.
9977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9978 * @param iSegReg The index of the segment register to use for
9979 * this access. The base and limits are checked.
9980 * @param GCPtrMem The address of the guest memory.
9981 * @param u32Value The value to store.
9982 */
9983IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9984{
9985 /* The lazy approach for now... */
9986 uint32_t *pu32Dst;
9987 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9988 if (rc == VINF_SUCCESS)
9989 {
9990 *pu32Dst = u32Value;
9991 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9992 }
9993 return rc;
9994}
9995
9996
9997#ifdef IEM_WITH_SETJMP
9998/**
9999 * Stores a data dword.
10000 *
10001 * @returns Strict VBox status code.
10002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10003 * @param iSegReg The index of the segment register to use for
10004 * this access. The base and limits are checked.
10005 * @param GCPtrMem The address of the guest memory.
10006 * @param u32Value The value to store.
10007 */
10008IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10009{
10010 /* The lazy approach for now... */
10011 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10012 *pu32Dst = u32Value;
10013 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10014}
10015#endif
10016
10017
10018/**
10019 * Stores a data qword.
10020 *
10021 * @returns Strict VBox status code.
10022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10023 * @param iSegReg The index of the segment register to use for
10024 * this access. The base and limits are checked.
10025 * @param GCPtrMem The address of the guest memory.
10026 * @param u64Value The value to store.
10027 */
10028IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10029{
10030 /* The lazy approach for now... */
10031 uint64_t *pu64Dst;
10032 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10033 if (rc == VINF_SUCCESS)
10034 {
10035 *pu64Dst = u64Value;
10036 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10037 }
10038 return rc;
10039}
10040
10041
10042#ifdef IEM_WITH_SETJMP
10043/**
10044 * Stores a data qword, longjmp on error.
10045 *
10046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10047 * @param iSegReg The index of the segment register to use for
10048 * this access. The base and limits are checked.
10049 * @param GCPtrMem The address of the guest memory.
10050 * @param u64Value The value to store.
10051 */
10052IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10053{
10054 /* The lazy approach for now... */
10055 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10056 *pu64Dst = u64Value;
10057 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10058}
10059#endif
10060
10061
10062/**
10063 * Stores a data dqword.
10064 *
10065 * @returns Strict VBox status code.
10066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10067 * @param iSegReg The index of the segment register to use for
10068 * this access. The base and limits are checked.
10069 * @param GCPtrMem The address of the guest memory.
10070 * @param u128Value The value to store.
10071 */
10072IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10073{
10074 /* The lazy approach for now... */
10075 PRTUINT128U pu128Dst;
10076 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10077 if (rc == VINF_SUCCESS)
10078 {
10079 pu128Dst->au64[0] = u128Value.au64[0];
10080 pu128Dst->au64[1] = u128Value.au64[1];
10081 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10082 }
10083 return rc;
10084}
10085
10086
10087#ifdef IEM_WITH_SETJMP
10088/**
10089 * Stores a data dqword, longjmp on error.
10090 *
10091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10092 * @param iSegReg The index of the segment register to use for
10093 * this access. The base and limits are checked.
10094 * @param GCPtrMem The address of the guest memory.
10095 * @param u128Value The value to store.
10096 */
10097IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10098{
10099 /* The lazy approach for now... */
10100 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10101 pu128Dst->au64[0] = u128Value.au64[0];
10102 pu128Dst->au64[1] = u128Value.au64[1];
10103 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10104}
10105#endif
10106
10107
10108/**
10109 * Stores a data dqword, SSE aligned.
10110 *
10111 * @returns Strict VBox status code.
10112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10113 * @param iSegReg The index of the segment register to use for
10114 * this access. The base and limits are checked.
10115 * @param GCPtrMem The address of the guest memory.
10116 * @param u128Value The value to store.
10117 */
10118IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10119{
10120 /* The lazy approach for now... */
10121 if ( (GCPtrMem & 15)
10122 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10123 return iemRaiseGeneralProtectionFault0(pVCpu);
10124
10125 PRTUINT128U pu128Dst;
10126 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10127 if (rc == VINF_SUCCESS)
10128 {
10129 pu128Dst->au64[0] = u128Value.au64[0];
10130 pu128Dst->au64[1] = u128Value.au64[1];
10131 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10132 }
10133 return rc;
10134}
10135
10136
10137#ifdef IEM_WITH_SETJMP
10138/**
10139 * Stores a data dqword, SSE aligned.
10140 *
10141 * @returns Strict VBox status code.
10142 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10143 * @param iSegReg The index of the segment register to use for
10144 * this access. The base and limits are checked.
10145 * @param GCPtrMem The address of the guest memory.
10146 * @param u128Value The value to store.
10147 */
10148DECL_NO_INLINE(IEM_STATIC, void)
10149iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10150{
10151 /* The lazy approach for now... */
10152 if ( (GCPtrMem & 15) == 0
10153 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10154 {
10155 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10156 pu128Dst->au64[0] = u128Value.au64[0];
10157 pu128Dst->au64[1] = u128Value.au64[1];
10158 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10159 return;
10160 }
10161
10162 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10163 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10164}
10165#endif
10166
10167
10168/**
10169 * Stores a data dqword.
10170 *
10171 * @returns Strict VBox status code.
10172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10173 * @param iSegReg The index of the segment register to use for
10174 * this access. The base and limits are checked.
10175 * @param GCPtrMem The address of the guest memory.
10176 * @param pu256Value Pointer to the value to store.
10177 */
10178IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10179{
10180 /* The lazy approach for now... */
10181 PRTUINT256U pu256Dst;
10182 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10183 if (rc == VINF_SUCCESS)
10184 {
10185 pu256Dst->au64[0] = pu256Value->au64[0];
10186 pu256Dst->au64[1] = pu256Value->au64[1];
10187 pu256Dst->au64[2] = pu256Value->au64[2];
10188 pu256Dst->au64[3] = pu256Value->au64[3];
10189 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10190 }
10191 return rc;
10192}
10193
10194
10195#ifdef IEM_WITH_SETJMP
10196/**
10197 * Stores a data dqword, longjmp on error.
10198 *
10199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10200 * @param iSegReg The index of the segment register to use for
10201 * this access. The base and limits are checked.
10202 * @param GCPtrMem The address of the guest memory.
10203 * @param pu256Value Pointer to the value to store.
10204 */
10205IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10206{
10207 /* The lazy approach for now... */
10208 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10209 pu256Dst->au64[0] = pu256Value->au64[0];
10210 pu256Dst->au64[1] = pu256Value->au64[1];
10211 pu256Dst->au64[2] = pu256Value->au64[2];
10212 pu256Dst->au64[3] = pu256Value->au64[3];
10213 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10214}
10215#endif
10216
10217
10218/**
10219 * Stores a data dqword, AVX aligned.
10220 *
10221 * @returns Strict VBox status code.
10222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10223 * @param iSegReg The index of the segment register to use for
10224 * this access. The base and limits are checked.
10225 * @param GCPtrMem The address of the guest memory.
10226 * @param pu256Value Pointer to the value to store.
10227 */
10228IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10229{
10230 /* The lazy approach for now... */
10231 if (GCPtrMem & 31)
10232 return iemRaiseGeneralProtectionFault0(pVCpu);
10233
10234 PRTUINT256U pu256Dst;
10235 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10236 if (rc == VINF_SUCCESS)
10237 {
10238 pu256Dst->au64[0] = pu256Value->au64[0];
10239 pu256Dst->au64[1] = pu256Value->au64[1];
10240 pu256Dst->au64[2] = pu256Value->au64[2];
10241 pu256Dst->au64[3] = pu256Value->au64[3];
10242 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10243 }
10244 return rc;
10245}
10246
10247
10248#ifdef IEM_WITH_SETJMP
10249/**
10250 * Stores a data dqword, AVX aligned.
10251 *
10252 * @returns Strict VBox status code.
10253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10254 * @param iSegReg The index of the segment register to use for
10255 * this access. The base and limits are checked.
10256 * @param GCPtrMem The address of the guest memory.
10257 * @param pu256Value Pointer to the value to store.
10258 */
10259DECL_NO_INLINE(IEM_STATIC, void)
10260iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10261{
10262 /* The lazy approach for now... */
10263 if ((GCPtrMem & 31) == 0)
10264 {
10265 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10266 pu256Dst->au64[0] = pu256Value->au64[0];
10267 pu256Dst->au64[1] = pu256Value->au64[1];
10268 pu256Dst->au64[2] = pu256Value->au64[2];
10269 pu256Dst->au64[3] = pu256Value->au64[3];
10270 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10271 return;
10272 }
10273
10274 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10275 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10276}
10277#endif
10278
10279
10280/**
10281 * Stores a descriptor register (sgdt, sidt).
10282 *
10283 * @returns Strict VBox status code.
10284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10285 * @param cbLimit The limit.
10286 * @param GCPtrBase The base address.
10287 * @param iSegReg The index of the segment register to use for
10288 * this access. The base and limits are checked.
10289 * @param GCPtrMem The address of the guest memory.
10290 */
10291IEM_STATIC VBOXSTRICTRC
10292iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10293{
10294 /*
10295 * The SIDT and SGDT instructions actually stores the data using two
10296 * independent writes. The instructions does not respond to opsize prefixes.
10297 */
10298 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10299 if (rcStrict == VINF_SUCCESS)
10300 {
10301 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10302 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10303 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10304 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10305 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10306 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10307 else
10308 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10309 }
10310 return rcStrict;
10311}
10312
10313
10314/**
10315 * Pushes a word onto the stack.
10316 *
10317 * @returns Strict VBox status code.
10318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10319 * @param u16Value The value to push.
10320 */
10321IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10322{
10323 /* Increment the stack pointer. */
10324 uint64_t uNewRsp;
10325 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10326
10327 /* Write the word the lazy way. */
10328 uint16_t *pu16Dst;
10329 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10330 if (rc == VINF_SUCCESS)
10331 {
10332 *pu16Dst = u16Value;
10333 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10334 }
10335
10336 /* Commit the new RSP value unless we an access handler made trouble. */
10337 if (rc == VINF_SUCCESS)
10338 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10339
10340 return rc;
10341}
10342
10343
10344/**
10345 * Pushes a dword onto the stack.
10346 *
10347 * @returns Strict VBox status code.
10348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10349 * @param u32Value The value to push.
10350 */
10351IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10352{
10353 /* Increment the stack pointer. */
10354 uint64_t uNewRsp;
10355 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10356
10357 /* Write the dword the lazy way. */
10358 uint32_t *pu32Dst;
10359 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10360 if (rc == VINF_SUCCESS)
10361 {
10362 *pu32Dst = u32Value;
10363 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10364 }
10365
10366 /* Commit the new RSP value unless we an access handler made trouble. */
10367 if (rc == VINF_SUCCESS)
10368 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10369
10370 return rc;
10371}
10372
10373
10374/**
10375 * Pushes a dword segment register value onto the stack.
10376 *
10377 * @returns Strict VBox status code.
10378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10379 * @param u32Value The value to push.
10380 */
10381IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10382{
10383 /* Increment the stack pointer. */
10384 uint64_t uNewRsp;
10385 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10386
10387 /* The intel docs talks about zero extending the selector register
10388 value. My actual intel CPU here might be zero extending the value
10389 but it still only writes the lower word... */
10390 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10391 * happens when crossing an electric page boundrary, is the high word checked
10392 * for write accessibility or not? Probably it is. What about segment limits?
10393 * It appears this behavior is also shared with trap error codes.
10394 *
10395 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10396 * ancient hardware when it actually did change. */
10397 uint16_t *pu16Dst;
10398 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10399 if (rc == VINF_SUCCESS)
10400 {
10401 *pu16Dst = (uint16_t)u32Value;
10402 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10403 }
10404
10405 /* Commit the new RSP value unless we an access handler made trouble. */
10406 if (rc == VINF_SUCCESS)
10407 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10408
10409 return rc;
10410}
10411
10412
10413/**
10414 * Pushes a qword onto the stack.
10415 *
10416 * @returns Strict VBox status code.
10417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10418 * @param u64Value The value to push.
10419 */
10420IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10421{
10422 /* Increment the stack pointer. */
10423 uint64_t uNewRsp;
10424 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10425
10426 /* Write the word the lazy way. */
10427 uint64_t *pu64Dst;
10428 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10429 if (rc == VINF_SUCCESS)
10430 {
10431 *pu64Dst = u64Value;
10432 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10433 }
10434
10435 /* Commit the new RSP value unless we an access handler made trouble. */
10436 if (rc == VINF_SUCCESS)
10437 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10438
10439 return rc;
10440}
10441
10442
10443/**
10444 * Pops a word from the stack.
10445 *
10446 * @returns Strict VBox status code.
10447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10448 * @param pu16Value Where to store the popped value.
10449 */
10450IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10451{
10452 /* Increment the stack pointer. */
10453 uint64_t uNewRsp;
10454 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10455
10456 /* Write the word the lazy way. */
10457 uint16_t const *pu16Src;
10458 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10459 if (rc == VINF_SUCCESS)
10460 {
10461 *pu16Value = *pu16Src;
10462 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10463
10464 /* Commit the new RSP value. */
10465 if (rc == VINF_SUCCESS)
10466 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10467 }
10468
10469 return rc;
10470}
10471
10472
10473/**
10474 * Pops a dword from the stack.
10475 *
10476 * @returns Strict VBox status code.
10477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10478 * @param pu32Value Where to store the popped value.
10479 */
10480IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10481{
10482 /* Increment the stack pointer. */
10483 uint64_t uNewRsp;
10484 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10485
10486 /* Write the word the lazy way. */
10487 uint32_t const *pu32Src;
10488 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10489 if (rc == VINF_SUCCESS)
10490 {
10491 *pu32Value = *pu32Src;
10492 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10493
10494 /* Commit the new RSP value. */
10495 if (rc == VINF_SUCCESS)
10496 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10497 }
10498
10499 return rc;
10500}
10501
10502
10503/**
10504 * Pops a qword from the stack.
10505 *
10506 * @returns Strict VBox status code.
10507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10508 * @param pu64Value Where to store the popped value.
10509 */
10510IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10511{
10512 /* Increment the stack pointer. */
10513 uint64_t uNewRsp;
10514 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10515
10516 /* Write the word the lazy way. */
10517 uint64_t const *pu64Src;
10518 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10519 if (rc == VINF_SUCCESS)
10520 {
10521 *pu64Value = *pu64Src;
10522 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10523
10524 /* Commit the new RSP value. */
10525 if (rc == VINF_SUCCESS)
10526 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10527 }
10528
10529 return rc;
10530}
10531
10532
10533/**
10534 * Pushes a word onto the stack, using a temporary stack pointer.
10535 *
10536 * @returns Strict VBox status code.
10537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10538 * @param u16Value The value to push.
10539 * @param pTmpRsp Pointer to the temporary stack pointer.
10540 */
10541IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10542{
10543 /* Increment the stack pointer. */
10544 RTUINT64U NewRsp = *pTmpRsp;
10545 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10546
10547 /* Write the word the lazy way. */
10548 uint16_t *pu16Dst;
10549 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10550 if (rc == VINF_SUCCESS)
10551 {
10552 *pu16Dst = u16Value;
10553 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10554 }
10555
10556 /* Commit the new RSP value unless we an access handler made trouble. */
10557 if (rc == VINF_SUCCESS)
10558 *pTmpRsp = NewRsp;
10559
10560 return rc;
10561}
10562
10563
10564/**
10565 * Pushes a dword onto the stack, using a temporary stack pointer.
10566 *
10567 * @returns Strict VBox status code.
10568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10569 * @param u32Value The value to push.
10570 * @param pTmpRsp Pointer to the temporary stack pointer.
10571 */
10572IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10573{
10574 /* Increment the stack pointer. */
10575 RTUINT64U NewRsp = *pTmpRsp;
10576 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10577
10578 /* Write the word the lazy way. */
10579 uint32_t *pu32Dst;
10580 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10581 if (rc == VINF_SUCCESS)
10582 {
10583 *pu32Dst = u32Value;
10584 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10585 }
10586
10587 /* Commit the new RSP value unless we an access handler made trouble. */
10588 if (rc == VINF_SUCCESS)
10589 *pTmpRsp = NewRsp;
10590
10591 return rc;
10592}
10593
10594
10595/**
10596 * Pushes a dword onto the stack, using a temporary stack pointer.
10597 *
10598 * @returns Strict VBox status code.
10599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10600 * @param u64Value The value to push.
10601 * @param pTmpRsp Pointer to the temporary stack pointer.
10602 */
10603IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10604{
10605 /* Increment the stack pointer. */
10606 RTUINT64U NewRsp = *pTmpRsp;
10607 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10608
10609 /* Write the word the lazy way. */
10610 uint64_t *pu64Dst;
10611 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10612 if (rc == VINF_SUCCESS)
10613 {
10614 *pu64Dst = u64Value;
10615 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10616 }
10617
10618 /* Commit the new RSP value unless we an access handler made trouble. */
10619 if (rc == VINF_SUCCESS)
10620 *pTmpRsp = NewRsp;
10621
10622 return rc;
10623}
10624
10625
10626/**
10627 * Pops a word from the stack, using a temporary stack pointer.
10628 *
10629 * @returns Strict VBox status code.
10630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10631 * @param pu16Value Where to store the popped value.
10632 * @param pTmpRsp Pointer to the temporary stack pointer.
10633 */
10634IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10635{
10636 /* Increment the stack pointer. */
10637 RTUINT64U NewRsp = *pTmpRsp;
10638 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10639
10640 /* Write the word the lazy way. */
10641 uint16_t const *pu16Src;
10642 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10643 if (rc == VINF_SUCCESS)
10644 {
10645 *pu16Value = *pu16Src;
10646 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10647
10648 /* Commit the new RSP value. */
10649 if (rc == VINF_SUCCESS)
10650 *pTmpRsp = NewRsp;
10651 }
10652
10653 return rc;
10654}
10655
10656
10657/**
10658 * Pops a dword from the stack, using a temporary stack pointer.
10659 *
10660 * @returns Strict VBox status code.
10661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10662 * @param pu32Value Where to store the popped value.
10663 * @param pTmpRsp Pointer to the temporary stack pointer.
10664 */
10665IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10666{
10667 /* Increment the stack pointer. */
10668 RTUINT64U NewRsp = *pTmpRsp;
10669 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10670
10671 /* Write the word the lazy way. */
10672 uint32_t const *pu32Src;
10673 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10674 if (rc == VINF_SUCCESS)
10675 {
10676 *pu32Value = *pu32Src;
10677 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10678
10679 /* Commit the new RSP value. */
10680 if (rc == VINF_SUCCESS)
10681 *pTmpRsp = NewRsp;
10682 }
10683
10684 return rc;
10685}
10686
10687
10688/**
10689 * Pops a qword from the stack, using a temporary stack pointer.
10690 *
10691 * @returns Strict VBox status code.
10692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10693 * @param pu64Value Where to store the popped value.
10694 * @param pTmpRsp Pointer to the temporary stack pointer.
10695 */
10696IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10697{
10698 /* Increment the stack pointer. */
10699 RTUINT64U NewRsp = *pTmpRsp;
10700 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10701
10702 /* Write the word the lazy way. */
10703 uint64_t const *pu64Src;
10704 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10705 if (rcStrict == VINF_SUCCESS)
10706 {
10707 *pu64Value = *pu64Src;
10708 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10709
10710 /* Commit the new RSP value. */
10711 if (rcStrict == VINF_SUCCESS)
10712 *pTmpRsp = NewRsp;
10713 }
10714
10715 return rcStrict;
10716}
10717
10718
10719/**
10720 * Begin a special stack push (used by interrupt, exceptions and such).
10721 *
10722 * This will raise \#SS or \#PF if appropriate.
10723 *
10724 * @returns Strict VBox status code.
10725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10726 * @param cbMem The number of bytes to push onto the stack.
10727 * @param ppvMem Where to return the pointer to the stack memory.
10728 * As with the other memory functions this could be
10729 * direct access or bounce buffered access, so
10730 * don't commit register until the commit call
10731 * succeeds.
10732 * @param puNewRsp Where to return the new RSP value. This must be
10733 * passed unchanged to
10734 * iemMemStackPushCommitSpecial().
10735 */
10736IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10737{
10738 Assert(cbMem < UINT8_MAX);
10739 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10740 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10741}
10742
10743
10744/**
10745 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10746 *
10747 * This will update the rSP.
10748 *
10749 * @returns Strict VBox status code.
10750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10751 * @param pvMem The pointer returned by
10752 * iemMemStackPushBeginSpecial().
10753 * @param uNewRsp The new RSP value returned by
10754 * iemMemStackPushBeginSpecial().
10755 */
10756IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10757{
10758 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10759 if (rcStrict == VINF_SUCCESS)
10760 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10761 return rcStrict;
10762}
10763
10764
10765/**
10766 * Begin a special stack pop (used by iret, retf and such).
10767 *
10768 * This will raise \#SS or \#PF if appropriate.
10769 *
10770 * @returns Strict VBox status code.
10771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10772 * @param cbMem The number of bytes to pop from the stack.
10773 * @param ppvMem Where to return the pointer to the stack memory.
10774 * @param puNewRsp Where to return the new RSP value. This must be
10775 * assigned to CPUMCTX::rsp manually some time
10776 * after iemMemStackPopDoneSpecial() has been
10777 * called.
10778 */
10779IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10780{
10781 Assert(cbMem < UINT8_MAX);
10782 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10783 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10784}
10785
10786
10787/**
10788 * Continue a special stack pop (used by iret and retf).
10789 *
10790 * This will raise \#SS or \#PF if appropriate.
10791 *
10792 * @returns Strict VBox status code.
10793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10794 * @param cbMem The number of bytes to pop from the stack.
10795 * @param ppvMem Where to return the pointer to the stack memory.
10796 * @param puNewRsp Where to return the new RSP value. This must be
10797 * assigned to CPUMCTX::rsp manually some time
10798 * after iemMemStackPopDoneSpecial() has been
10799 * called.
10800 */
10801IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10802{
10803 Assert(cbMem < UINT8_MAX);
10804 RTUINT64U NewRsp;
10805 NewRsp.u = *puNewRsp;
10806 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10807 *puNewRsp = NewRsp.u;
10808 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10809}
10810
10811
10812/**
10813 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10814 * iemMemStackPopContinueSpecial).
10815 *
10816 * The caller will manually commit the rSP.
10817 *
10818 * @returns Strict VBox status code.
10819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10820 * @param pvMem The pointer returned by
10821 * iemMemStackPopBeginSpecial() or
10822 * iemMemStackPopContinueSpecial().
10823 */
10824IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10825{
10826 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10827}
10828
10829
10830/**
10831 * Fetches a system table byte.
10832 *
10833 * @returns Strict VBox status code.
10834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10835 * @param pbDst Where to return the byte.
10836 * @param iSegReg The index of the segment register to use for
10837 * this access. The base and limits are checked.
10838 * @param GCPtrMem The address of the guest memory.
10839 */
10840IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10841{
10842 /* The lazy approach for now... */
10843 uint8_t const *pbSrc;
10844 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10845 if (rc == VINF_SUCCESS)
10846 {
10847 *pbDst = *pbSrc;
10848 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10849 }
10850 return rc;
10851}
10852
10853
10854/**
10855 * Fetches a system table word.
10856 *
10857 * @returns Strict VBox status code.
10858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10859 * @param pu16Dst Where to return the word.
10860 * @param iSegReg The index of the segment register to use for
10861 * this access. The base and limits are checked.
10862 * @param GCPtrMem The address of the guest memory.
10863 */
10864IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10865{
10866 /* The lazy approach for now... */
10867 uint16_t const *pu16Src;
10868 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10869 if (rc == VINF_SUCCESS)
10870 {
10871 *pu16Dst = *pu16Src;
10872 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10873 }
10874 return rc;
10875}
10876
10877
10878/**
10879 * Fetches a system table dword.
10880 *
10881 * @returns Strict VBox status code.
10882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10883 * @param pu32Dst Where to return the dword.
10884 * @param iSegReg The index of the segment register to use for
10885 * this access. The base and limits are checked.
10886 * @param GCPtrMem The address of the guest memory.
10887 */
10888IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10889{
10890 /* The lazy approach for now... */
10891 uint32_t const *pu32Src;
10892 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10893 if (rc == VINF_SUCCESS)
10894 {
10895 *pu32Dst = *pu32Src;
10896 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10897 }
10898 return rc;
10899}
10900
10901
10902/**
10903 * Fetches a system table qword.
10904 *
10905 * @returns Strict VBox status code.
10906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10907 * @param pu64Dst Where to return the qword.
10908 * @param iSegReg The index of the segment register to use for
10909 * this access. The base and limits are checked.
10910 * @param GCPtrMem The address of the guest memory.
10911 */
10912IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10913{
10914 /* The lazy approach for now... */
10915 uint64_t const *pu64Src;
10916 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10917 if (rc == VINF_SUCCESS)
10918 {
10919 *pu64Dst = *pu64Src;
10920 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10921 }
10922 return rc;
10923}
10924
10925
10926/**
10927 * Fetches a descriptor table entry with caller specified error code.
10928 *
10929 * @returns Strict VBox status code.
10930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10931 * @param pDesc Where to return the descriptor table entry.
10932 * @param uSel The selector which table entry to fetch.
10933 * @param uXcpt The exception to raise on table lookup error.
10934 * @param uErrorCode The error code associated with the exception.
10935 */
10936IEM_STATIC VBOXSTRICTRC
10937iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10938{
10939 AssertPtr(pDesc);
10940 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10941
10942 /** @todo did the 286 require all 8 bytes to be accessible? */
10943 /*
10944 * Get the selector table base and check bounds.
10945 */
10946 RTGCPTR GCPtrBase;
10947 if (uSel & X86_SEL_LDT)
10948 {
10949 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10950 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10951 {
10952 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10953 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10954 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10955 uErrorCode, 0);
10956 }
10957
10958 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10959 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10960 }
10961 else
10962 {
10963 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10964 {
10965 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10966 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10967 uErrorCode, 0);
10968 }
10969 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10970 }
10971
10972 /*
10973 * Read the legacy descriptor and maybe the long mode extensions if
10974 * required.
10975 */
10976 VBOXSTRICTRC rcStrict;
10977 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10978 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10979 else
10980 {
10981 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10982 if (rcStrict == VINF_SUCCESS)
10983 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10984 if (rcStrict == VINF_SUCCESS)
10985 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10986 if (rcStrict == VINF_SUCCESS)
10987 pDesc->Legacy.au16[3] = 0;
10988 else
10989 return rcStrict;
10990 }
10991
10992 if (rcStrict == VINF_SUCCESS)
10993 {
10994 if ( !IEM_IS_LONG_MODE(pVCpu)
10995 || pDesc->Legacy.Gen.u1DescType)
10996 pDesc->Long.au64[1] = 0;
10997 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10998 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10999 else
11000 {
11001 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11002 /** @todo is this the right exception? */
11003 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11004 }
11005 }
11006 return rcStrict;
11007}
11008
11009
11010/**
11011 * Fetches a descriptor table entry.
11012 *
11013 * @returns Strict VBox status code.
11014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11015 * @param pDesc Where to return the descriptor table entry.
11016 * @param uSel The selector which table entry to fetch.
11017 * @param uXcpt The exception to raise on table lookup error.
11018 */
11019IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11020{
11021 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11022}
11023
11024
11025/**
11026 * Fakes a long mode stack selector for SS = 0.
11027 *
11028 * @param pDescSs Where to return the fake stack descriptor.
11029 * @param uDpl The DPL we want.
11030 */
11031IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11032{
11033 pDescSs->Long.au64[0] = 0;
11034 pDescSs->Long.au64[1] = 0;
11035 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11036 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11037 pDescSs->Long.Gen.u2Dpl = uDpl;
11038 pDescSs->Long.Gen.u1Present = 1;
11039 pDescSs->Long.Gen.u1Long = 1;
11040}
11041
11042
11043/**
11044 * Marks the selector descriptor as accessed (only non-system descriptors).
11045 *
11046 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11047 * will therefore skip the limit checks.
11048 *
11049 * @returns Strict VBox status code.
11050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11051 * @param uSel The selector.
11052 */
11053IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11054{
11055 /*
11056 * Get the selector table base and calculate the entry address.
11057 */
11058 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11059 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11060 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11061 GCPtr += uSel & X86_SEL_MASK;
11062
11063 /*
11064 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11065 * ugly stuff to avoid this. This will make sure it's an atomic access
11066 * as well more or less remove any question about 8-bit or 32-bit accesss.
11067 */
11068 VBOXSTRICTRC rcStrict;
11069 uint32_t volatile *pu32;
11070 if ((GCPtr & 3) == 0)
11071 {
11072 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11073 GCPtr += 2 + 2;
11074 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11075 if (rcStrict != VINF_SUCCESS)
11076 return rcStrict;
11077 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11078 }
11079 else
11080 {
11081 /* The misaligned GDT/LDT case, map the whole thing. */
11082 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11083 if (rcStrict != VINF_SUCCESS)
11084 return rcStrict;
11085 switch ((uintptr_t)pu32 & 3)
11086 {
11087 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11088 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11089 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11090 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11091 }
11092 }
11093
11094 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11095}
11096
11097/** @} */
11098
11099
11100/*
11101 * Include the C/C++ implementation of instruction.
11102 */
11103#include "IEMAllCImpl.cpp.h"
11104
11105
11106
11107/** @name "Microcode" macros.
11108 *
11109 * The idea is that we should be able to use the same code to interpret
11110 * instructions as well as recompiler instructions. Thus this obfuscation.
11111 *
11112 * @{
11113 */
11114#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11115#define IEM_MC_END() }
11116#define IEM_MC_PAUSE() do {} while (0)
11117#define IEM_MC_CONTINUE() do {} while (0)
11118
11119/** Internal macro. */
11120#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11121 do \
11122 { \
11123 VBOXSTRICTRC rcStrict2 = a_Expr; \
11124 if (rcStrict2 != VINF_SUCCESS) \
11125 return rcStrict2; \
11126 } while (0)
11127
11128
11129#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11130#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11131#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11132#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11133#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11134#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11135#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11136#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11137#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11138 do { \
11139 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11140 return iemRaiseDeviceNotAvailable(pVCpu); \
11141 } while (0)
11142#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11143 do { \
11144 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11145 return iemRaiseDeviceNotAvailable(pVCpu); \
11146 } while (0)
11147#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11148 do { \
11149 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11150 return iemRaiseMathFault(pVCpu); \
11151 } while (0)
11152#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11153 do { \
11154 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11155 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11156 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11157 return iemRaiseUndefinedOpcode(pVCpu); \
11158 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11159 return iemRaiseDeviceNotAvailable(pVCpu); \
11160 } while (0)
11161#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11162 do { \
11163 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11164 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11165 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11166 return iemRaiseUndefinedOpcode(pVCpu); \
11167 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11168 return iemRaiseDeviceNotAvailable(pVCpu); \
11169 } while (0)
11170#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11171 do { \
11172 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11173 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11174 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11175 return iemRaiseUndefinedOpcode(pVCpu); \
11176 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11177 return iemRaiseDeviceNotAvailable(pVCpu); \
11178 } while (0)
11179#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11180 do { \
11181 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11182 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11183 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11184 return iemRaiseUndefinedOpcode(pVCpu); \
11185 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11186 return iemRaiseDeviceNotAvailable(pVCpu); \
11187 } while (0)
11188#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11189 do { \
11190 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11191 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11192 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11193 return iemRaiseUndefinedOpcode(pVCpu); \
11194 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11195 return iemRaiseDeviceNotAvailable(pVCpu); \
11196 } while (0)
11197#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11198 do { \
11199 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11200 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11201 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11202 return iemRaiseUndefinedOpcode(pVCpu); \
11203 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11204 return iemRaiseDeviceNotAvailable(pVCpu); \
11205 } while (0)
11206#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11207 do { \
11208 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11209 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11210 return iemRaiseUndefinedOpcode(pVCpu); \
11211 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11212 return iemRaiseDeviceNotAvailable(pVCpu); \
11213 } while (0)
11214#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11215 do { \
11216 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11217 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11218 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11219 return iemRaiseUndefinedOpcode(pVCpu); \
11220 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11221 return iemRaiseDeviceNotAvailable(pVCpu); \
11222 } while (0)
11223#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11224 do { \
11225 if (pVCpu->iem.s.uCpl != 0) \
11226 return iemRaiseGeneralProtectionFault0(pVCpu); \
11227 } while (0)
11228#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11229 do { \
11230 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11231 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11232 } while (0)
11233#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11234 do { \
11235 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11236 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11237 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11238 return iemRaiseUndefinedOpcode(pVCpu); \
11239 } while (0)
11240#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11241 do { \
11242 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11243 return iemRaiseGeneralProtectionFault0(pVCpu); \
11244 } while (0)
11245
11246
11247#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11248#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11249#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11250#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11251#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11252#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11253#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11254 uint32_t a_Name; \
11255 uint32_t *a_pName = &a_Name
11256#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11257 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11258
11259#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11260#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11261
11262#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11263#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11264#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11265#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11266#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11267#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11268#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11269#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11270#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11271#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11272#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11273#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11274#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11275#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11276#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11277#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11278#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11279#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11280 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11281 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11282 } while (0)
11283#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11284 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11285 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11286 } while (0)
11287#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11288 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11289 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11290 } while (0)
11291/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11292#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11293 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11294 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11295 } while (0)
11296#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11297 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11298 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11299 } while (0)
11300/** @note Not for IOPL or IF testing or modification. */
11301#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11302#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11303#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11304#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11305
11306#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11307#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11308#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11309#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11310#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11311#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11312#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11313#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11314#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11315#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11316/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11317#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11318 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11319 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11320 } while (0)
11321#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11322 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11323 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11324 } while (0)
11325#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11326 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11327
11328
11329#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11330#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11331/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11332 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11333#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11334#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11335/** @note Not for IOPL or IF testing or modification. */
11336#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11337
11338#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11339#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11340#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11341 do { \
11342 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11343 *pu32Reg += (a_u32Value); \
11344 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11345 } while (0)
11346#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11347
11348#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11349#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11350#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11351 do { \
11352 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11353 *pu32Reg -= (a_u32Value); \
11354 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11355 } while (0)
11356#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11357#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11358
11359#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11360#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11361#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11362#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11363#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11364#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11365#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11366
11367#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11368#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11369#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11370#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11371
11372#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11373#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11374#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11375
11376#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11377#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11378#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11379
11380#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11381#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11382#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11383
11384#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11385#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11386#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11387
11388#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11389
11390#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11391
11392#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11393#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11394#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11395 do { \
11396 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11397 *pu32Reg &= (a_u32Value); \
11398 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11399 } while (0)
11400#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11401
11402#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11403#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11404#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11405 do { \
11406 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11407 *pu32Reg |= (a_u32Value); \
11408 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11409 } while (0)
11410#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11411
11412
11413/** @note Not for IOPL or IF modification. */
11414#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11415/** @note Not for IOPL or IF modification. */
11416#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11417/** @note Not for IOPL or IF modification. */
11418#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11419
11420#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11421
11422/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11423#define IEM_MC_FPU_TO_MMX_MODE() do { \
11424 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11425 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11426 } while (0)
11427
11428/** Switches the FPU state from MMX mode (FTW=0xffff). */
11429#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11430 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11431 } while (0)
11432
11433#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11434 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11435#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11436 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11437#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11438 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11439 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11440 } while (0)
11441#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11442 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11443 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11444 } while (0)
11445#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11446 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11447#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11448 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11449#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11450 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11451
11452#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11453 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11454 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11455 } while (0)
11456#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11457 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11458#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11459 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11460#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11461 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11462#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11463 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11464 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11465 } while (0)
11466#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11467 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11468#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11469 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11470 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11471 } while (0)
11472#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11473 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11474#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11475 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11476 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11477 } while (0)
11478#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11479 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11480#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11481 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11482#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11483 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11484#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11485 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11486#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11487 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11488 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11489 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11490 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11491 } while (0)
11492
11493#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11494 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11495 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11496 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11497 } while (0)
11498#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11499 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11500 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11501 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11502 } while (0)
11503#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11504 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11505 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11506 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11507 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11508 } while (0)
11509#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11510 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11511 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11512 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11513 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11514 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11515 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11516 } while (0)
11517
11518#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11519#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11520 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11521 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11522 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11523 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11524 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11525 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11526 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11527 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11528 } while (0)
11529#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11530 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11531 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11532 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11533 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11534 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11535 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11536 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11537 } while (0)
11538#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11539 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11540 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11542 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11544 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11545 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11546 } while (0)
11547#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11548 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11549 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11550 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11551 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11552 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11553 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11554 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11555 } while (0)
11556
11557#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11558 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11559#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11560 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11561#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11562 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11563#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11564 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11565 uintptr_t const iYRegTmp = (a_iYReg); \
11566 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11568 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11569 } while (0)
11570
11571#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11572 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11573 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11574 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11575 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11576 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11577 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11578 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11579 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11580 } while (0)
11581#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11582 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11583 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11584 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11586 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11589 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11590 } while (0)
11591#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11592 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11593 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11594 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11595 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11596 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11598 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11599 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11600 } while (0)
11601
11602#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11603 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11604 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11605 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11606 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11607 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11608 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11609 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11610 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11611 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11612 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11613 } while (0)
11614#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11615 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11616 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11617 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11618 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11619 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11620 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11621 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11622 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11623 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11624 } while (0)
11625#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11626 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11627 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11628 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11629 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11630 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11631 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11632 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11633 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11634 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11635 } while (0)
11636#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11637 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11638 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11639 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11640 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11641 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11642 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11643 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11644 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11645 } while (0)
11646
11647#ifndef IEM_WITH_SETJMP
11648# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11650# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11652# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11653 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11654#else
11655# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11656 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11657# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11658 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11659# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11660 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11661#endif
11662
11663#ifndef IEM_WITH_SETJMP
11664# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11666# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11667 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11668# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11670#else
11671# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11672 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11673# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11674 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11675# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11676 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11677#endif
11678
11679#ifndef IEM_WITH_SETJMP
11680# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11684# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11686#else
11687# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11688 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11689# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11690 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11691# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11692 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11693#endif
11694
11695#ifdef SOME_UNUSED_FUNCTION
11696# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11698#endif
11699
11700#ifndef IEM_WITH_SETJMP
11701# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11703# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11705# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11707# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11709#else
11710# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11711 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11713 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11714# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11715 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11716# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11717 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11718#endif
11719
11720#ifndef IEM_WITH_SETJMP
11721# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11723# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11725# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11727#else
11728# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11729 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11730# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11731 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11732# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11733 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11734#endif
11735
11736#ifndef IEM_WITH_SETJMP
11737# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11739# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11741#else
11742# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11743 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11744# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11745 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11746#endif
11747
11748#ifndef IEM_WITH_SETJMP
11749# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11751# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11753#else
11754# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11755 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11756# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11757 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11758#endif
11759
11760
11761
11762#ifndef IEM_WITH_SETJMP
11763# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11764 do { \
11765 uint8_t u8Tmp; \
11766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11767 (a_u16Dst) = u8Tmp; \
11768 } while (0)
11769# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11770 do { \
11771 uint8_t u8Tmp; \
11772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11773 (a_u32Dst) = u8Tmp; \
11774 } while (0)
11775# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11776 do { \
11777 uint8_t u8Tmp; \
11778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11779 (a_u64Dst) = u8Tmp; \
11780 } while (0)
11781# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11782 do { \
11783 uint16_t u16Tmp; \
11784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11785 (a_u32Dst) = u16Tmp; \
11786 } while (0)
11787# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11788 do { \
11789 uint16_t u16Tmp; \
11790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11791 (a_u64Dst) = u16Tmp; \
11792 } while (0)
11793# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11794 do { \
11795 uint32_t u32Tmp; \
11796 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11797 (a_u64Dst) = u32Tmp; \
11798 } while (0)
11799#else /* IEM_WITH_SETJMP */
11800# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11801 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11802# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11803 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11804# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11805 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11806# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11807 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11808# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11809 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11810# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11811 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11812#endif /* IEM_WITH_SETJMP */
11813
11814#ifndef IEM_WITH_SETJMP
11815# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11816 do { \
11817 uint8_t u8Tmp; \
11818 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11819 (a_u16Dst) = (int8_t)u8Tmp; \
11820 } while (0)
11821# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11822 do { \
11823 uint8_t u8Tmp; \
11824 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11825 (a_u32Dst) = (int8_t)u8Tmp; \
11826 } while (0)
11827# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11828 do { \
11829 uint8_t u8Tmp; \
11830 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11831 (a_u64Dst) = (int8_t)u8Tmp; \
11832 } while (0)
11833# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11834 do { \
11835 uint16_t u16Tmp; \
11836 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11837 (a_u32Dst) = (int16_t)u16Tmp; \
11838 } while (0)
11839# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11840 do { \
11841 uint16_t u16Tmp; \
11842 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11843 (a_u64Dst) = (int16_t)u16Tmp; \
11844 } while (0)
11845# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11846 do { \
11847 uint32_t u32Tmp; \
11848 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11849 (a_u64Dst) = (int32_t)u32Tmp; \
11850 } while (0)
11851#else /* IEM_WITH_SETJMP */
11852# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11853 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11854# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11855 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11856# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11857 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11858# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11859 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11860# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11861 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11862# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11863 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11864#endif /* IEM_WITH_SETJMP */
11865
11866#ifndef IEM_WITH_SETJMP
11867# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11869# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11871# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11873# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11875#else
11876# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11877 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11878# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11879 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11880# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11881 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11882# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11883 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11884#endif
11885
11886#ifndef IEM_WITH_SETJMP
11887# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11888 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11889# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11891# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11892 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11893# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11894 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11895#else
11896# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11897 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11898# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11899 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11900# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11901 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11902# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11903 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11904#endif
11905
11906#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11907#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11908#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11909#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11910#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11911#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11912#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11913 do { \
11914 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11915 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11916 } while (0)
11917
11918#ifndef IEM_WITH_SETJMP
11919# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11920 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11921# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11922 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11923#else
11924# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11925 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11926# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11927 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11928#endif
11929
11930#ifndef IEM_WITH_SETJMP
11931# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11932 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11933# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11934 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11935#else
11936# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11937 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11938# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11939 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11940#endif
11941
11942
11943#define IEM_MC_PUSH_U16(a_u16Value) \
11944 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11945#define IEM_MC_PUSH_U32(a_u32Value) \
11946 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11947#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11948 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11949#define IEM_MC_PUSH_U64(a_u64Value) \
11950 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11951
11952#define IEM_MC_POP_U16(a_pu16Value) \
11953 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11954#define IEM_MC_POP_U32(a_pu32Value) \
11955 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11956#define IEM_MC_POP_U64(a_pu64Value) \
11957 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11958
11959/** Maps guest memory for direct or bounce buffered access.
11960 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11961 * @remarks May return.
11962 */
11963#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11965
11966/** Maps guest memory for direct or bounce buffered access.
11967 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11968 * @remarks May return.
11969 */
11970#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11971 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11972
11973/** Commits the memory and unmaps the guest memory.
11974 * @remarks May return.
11975 */
11976#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11977 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11978
11979/** Commits the memory and unmaps the guest memory unless the FPU status word
11980 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11981 * that would cause FLD not to store.
11982 *
11983 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11984 * store, while \#P will not.
11985 *
11986 * @remarks May in theory return - for now.
11987 */
11988#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11989 do { \
11990 if ( !(a_u16FSW & X86_FSW_ES) \
11991 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11992 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11993 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11994 } while (0)
11995
11996/** Calculate efficient address from R/M. */
11997#ifndef IEM_WITH_SETJMP
11998# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11999 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12000#else
12001# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12002 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12003#endif
12004
12005#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12006#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12007#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12008#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12009#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12010#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12011#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12012
12013/**
12014 * Defers the rest of the instruction emulation to a C implementation routine
12015 * and returns, only taking the standard parameters.
12016 *
12017 * @param a_pfnCImpl The pointer to the C routine.
12018 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12019 */
12020#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12021
12022/**
12023 * Defers the rest of instruction emulation to a C implementation routine and
12024 * returns, taking one argument in addition to the standard ones.
12025 *
12026 * @param a_pfnCImpl The pointer to the C routine.
12027 * @param a0 The argument.
12028 */
12029#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12030
12031/**
12032 * Defers the rest of the instruction emulation to a C implementation routine
12033 * and returns, taking two arguments in addition to the standard ones.
12034 *
12035 * @param a_pfnCImpl The pointer to the C routine.
12036 * @param a0 The first extra argument.
12037 * @param a1 The second extra argument.
12038 */
12039#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12040
12041/**
12042 * Defers the rest of the instruction emulation to a C implementation routine
12043 * and returns, taking three arguments in addition to the standard ones.
12044 *
12045 * @param a_pfnCImpl The pointer to the C routine.
12046 * @param a0 The first extra argument.
12047 * @param a1 The second extra argument.
12048 * @param a2 The third extra argument.
12049 */
12050#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12051
12052/**
12053 * Defers the rest of the instruction emulation to a C implementation routine
12054 * and returns, taking four arguments in addition to the standard ones.
12055 *
12056 * @param a_pfnCImpl The pointer to the C routine.
12057 * @param a0 The first extra argument.
12058 * @param a1 The second extra argument.
12059 * @param a2 The third extra argument.
12060 * @param a3 The fourth extra argument.
12061 */
12062#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12063
12064/**
12065 * Defers the rest of the instruction emulation to a C implementation routine
12066 * and returns, taking two arguments in addition to the standard ones.
12067 *
12068 * @param a_pfnCImpl The pointer to the C routine.
12069 * @param a0 The first extra argument.
12070 * @param a1 The second extra argument.
12071 * @param a2 The third extra argument.
12072 * @param a3 The fourth extra argument.
12073 * @param a4 The fifth extra argument.
12074 */
12075#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12076
12077/**
12078 * Defers the entire instruction emulation to a C implementation routine and
12079 * returns, only taking the standard parameters.
12080 *
12081 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12082 *
12083 * @param a_pfnCImpl The pointer to the C routine.
12084 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12085 */
12086#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12087
12088/**
12089 * Defers the entire instruction emulation to a C implementation routine and
12090 * returns, taking one argument in addition to the standard ones.
12091 *
12092 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12093 *
12094 * @param a_pfnCImpl The pointer to the C routine.
12095 * @param a0 The argument.
12096 */
12097#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12098
12099/**
12100 * Defers the entire instruction emulation to a C implementation routine and
12101 * returns, taking two arguments in addition to the standard ones.
12102 *
12103 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12104 *
12105 * @param a_pfnCImpl The pointer to the C routine.
12106 * @param a0 The first extra argument.
12107 * @param a1 The second extra argument.
12108 */
12109#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12110
12111/**
12112 * Defers the entire instruction emulation to a C implementation routine and
12113 * returns, taking three arguments in addition to the standard ones.
12114 *
12115 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12116 *
12117 * @param a_pfnCImpl The pointer to the C routine.
12118 * @param a0 The first extra argument.
12119 * @param a1 The second extra argument.
12120 * @param a2 The third extra argument.
12121 */
12122#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12123
12124/**
12125 * Calls a FPU assembly implementation taking one visible argument.
12126 *
12127 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12128 * @param a0 The first extra argument.
12129 */
12130#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12131 do { \
12132 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12133 } while (0)
12134
12135/**
12136 * Calls a FPU assembly implementation taking two visible arguments.
12137 *
12138 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12139 * @param a0 The first extra argument.
12140 * @param a1 The second extra argument.
12141 */
12142#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12143 do { \
12144 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12145 } while (0)
12146
12147/**
12148 * Calls a FPU assembly implementation taking three visible arguments.
12149 *
12150 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12151 * @param a0 The first extra argument.
12152 * @param a1 The second extra argument.
12153 * @param a2 The third extra argument.
12154 */
12155#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12156 do { \
12157 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12158 } while (0)
12159
12160#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12161 do { \
12162 (a_FpuData).FSW = (a_FSW); \
12163 (a_FpuData).r80Result = *(a_pr80Value); \
12164 } while (0)
12165
12166/** Pushes FPU result onto the stack. */
12167#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12168 iemFpuPushResult(pVCpu, &a_FpuData)
12169/** Pushes FPU result onto the stack and sets the FPUDP. */
12170#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12171 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12172
12173/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12174#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12175 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12176
12177/** Stores FPU result in a stack register. */
12178#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12179 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12180/** Stores FPU result in a stack register and pops the stack. */
12181#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12182 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12183/** Stores FPU result in a stack register and sets the FPUDP. */
12184#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12185 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12186/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12187 * stack. */
12188#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12189 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12190
12191/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12192#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12193 iemFpuUpdateOpcodeAndIp(pVCpu)
12194/** Free a stack register (for FFREE and FFREEP). */
12195#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12196 iemFpuStackFree(pVCpu, a_iStReg)
12197/** Increment the FPU stack pointer. */
12198#define IEM_MC_FPU_STACK_INC_TOP() \
12199 iemFpuStackIncTop(pVCpu)
12200/** Decrement the FPU stack pointer. */
12201#define IEM_MC_FPU_STACK_DEC_TOP() \
12202 iemFpuStackDecTop(pVCpu)
12203
12204/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12205#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12206 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12207/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12208#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12209 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12210/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12211#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12212 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12213/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12214#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12215 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12216/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12217 * stack. */
12218#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12219 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12220/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12221#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12222 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12223
12224/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12225#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12226 iemFpuStackUnderflow(pVCpu, a_iStDst)
12227/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12228 * stack. */
12229#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12230 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12231/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12232 * FPUDS. */
12233#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12234 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12235/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12236 * FPUDS. Pops stack. */
12237#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12238 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12239/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12240 * stack twice. */
12241#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12242 iemFpuStackUnderflowThenPopPop(pVCpu)
12243/** Raises a FPU stack underflow exception for an instruction pushing a result
12244 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12245#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12246 iemFpuStackPushUnderflow(pVCpu)
12247/** Raises a FPU stack underflow exception for an instruction pushing a result
12248 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12249#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12250 iemFpuStackPushUnderflowTwo(pVCpu)
12251
12252/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12253 * FPUIP, FPUCS and FOP. */
12254#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12255 iemFpuStackPushOverflow(pVCpu)
12256/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12257 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12258#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12259 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12260/** Prepares for using the FPU state.
12261 * Ensures that we can use the host FPU in the current context (RC+R0.
12262 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12263#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12264/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12265#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12266/** Actualizes the guest FPU state so it can be accessed and modified. */
12267#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12268
12269/** Prepares for using the SSE state.
12270 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12271 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12272#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12273/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12274#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12275/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12276#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12277
12278/** Prepares for using the AVX state.
12279 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12280 * Ensures the guest AVX state in the CPUMCTX is up to date.
12281 * @note This will include the AVX512 state too when support for it is added
12282 * due to the zero extending feature of VEX instruction. */
12283#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12284/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12285#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12286/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12287#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12288
12289/**
12290 * Calls a MMX assembly implementation taking two visible arguments.
12291 *
12292 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12293 * @param a0 The first extra argument.
12294 * @param a1 The second extra argument.
12295 */
12296#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12297 do { \
12298 IEM_MC_PREPARE_FPU_USAGE(); \
12299 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12300 } while (0)
12301
12302/**
12303 * Calls a MMX assembly implementation taking three visible arguments.
12304 *
12305 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12306 * @param a0 The first extra argument.
12307 * @param a1 The second extra argument.
12308 * @param a2 The third extra argument.
12309 */
12310#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12311 do { \
12312 IEM_MC_PREPARE_FPU_USAGE(); \
12313 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12314 } while (0)
12315
12316
12317/**
12318 * Calls a SSE assembly implementation taking two visible arguments.
12319 *
12320 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12321 * @param a0 The first extra argument.
12322 * @param a1 The second extra argument.
12323 */
12324#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12325 do { \
12326 IEM_MC_PREPARE_SSE_USAGE(); \
12327 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12328 } while (0)
12329
12330/**
12331 * Calls a SSE assembly implementation taking three visible arguments.
12332 *
12333 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12334 * @param a0 The first extra argument.
12335 * @param a1 The second extra argument.
12336 * @param a2 The third extra argument.
12337 */
12338#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12339 do { \
12340 IEM_MC_PREPARE_SSE_USAGE(); \
12341 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12342 } while (0)
12343
12344
12345/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12346 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12347#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12348 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12349
12350/**
12351 * Calls a AVX assembly implementation taking two visible arguments.
12352 *
12353 * There is one implicit zero'th argument, a pointer to the extended state.
12354 *
12355 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12356 * @param a1 The first extra argument.
12357 * @param a2 The second extra argument.
12358 */
12359#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12360 do { \
12361 IEM_MC_PREPARE_AVX_USAGE(); \
12362 a_pfnAImpl(pXState, (a1), (a2)); \
12363 } while (0)
12364
12365/**
12366 * Calls a AVX assembly implementation taking three visible arguments.
12367 *
12368 * There is one implicit zero'th argument, a pointer to the extended state.
12369 *
12370 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12371 * @param a1 The first extra argument.
12372 * @param a2 The second extra argument.
12373 * @param a3 The third extra argument.
12374 */
12375#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12376 do { \
12377 IEM_MC_PREPARE_AVX_USAGE(); \
12378 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12379 } while (0)
12380
12381/** @note Not for IOPL or IF testing. */
12382#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12383/** @note Not for IOPL or IF testing. */
12384#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12385/** @note Not for IOPL or IF testing. */
12386#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12387/** @note Not for IOPL or IF testing. */
12388#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12389/** @note Not for IOPL or IF testing. */
12390#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12391 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12392 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12393/** @note Not for IOPL or IF testing. */
12394#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12395 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12396 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12397/** @note Not for IOPL or IF testing. */
12398#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12399 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12400 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12401 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12402/** @note Not for IOPL or IF testing. */
12403#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12404 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12405 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12406 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12407#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12408#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12409#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12410/** @note Not for IOPL or IF testing. */
12411#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12412 if ( pVCpu->cpum.GstCtx.cx != 0 \
12413 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12414/** @note Not for IOPL or IF testing. */
12415#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12416 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12417 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12418/** @note Not for IOPL or IF testing. */
12419#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12420 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12421 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12422/** @note Not for IOPL or IF testing. */
12423#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12424 if ( pVCpu->cpum.GstCtx.cx != 0 \
12425 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12426/** @note Not for IOPL or IF testing. */
12427#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12428 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12429 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12430/** @note Not for IOPL or IF testing. */
12431#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12432 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12433 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12434#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12435#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12436
12437#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12438 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12439#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12440 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12441#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12442 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12443#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12444 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12445#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12446 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12447#define IEM_MC_IF_FCW_IM() \
12448 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12449
12450#define IEM_MC_ELSE() } else {
12451#define IEM_MC_ENDIF() } do {} while (0)
12452
12453/** @} */
12454
12455
12456/** @name Opcode Debug Helpers.
12457 * @{
12458 */
12459#ifdef VBOX_WITH_STATISTICS
12460# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12461#else
12462# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12463#endif
12464
12465#ifdef DEBUG
12466# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12467 do { \
12468 IEMOP_INC_STATS(a_Stats); \
12469 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12470 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12471 } while (0)
12472
12473# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12474 do { \
12475 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12476 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12477 (void)RT_CONCAT(OP_,a_Upper); \
12478 (void)(a_fDisHints); \
12479 (void)(a_fIemHints); \
12480 } while (0)
12481
12482# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12483 do { \
12484 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12485 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12486 (void)RT_CONCAT(OP_,a_Upper); \
12487 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12488 (void)(a_fDisHints); \
12489 (void)(a_fIemHints); \
12490 } while (0)
12491
12492# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12493 do { \
12494 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12495 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12496 (void)RT_CONCAT(OP_,a_Upper); \
12497 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12498 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12499 (void)(a_fDisHints); \
12500 (void)(a_fIemHints); \
12501 } while (0)
12502
12503# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12504 do { \
12505 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12506 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12507 (void)RT_CONCAT(OP_,a_Upper); \
12508 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12509 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12510 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12511 (void)(a_fDisHints); \
12512 (void)(a_fIemHints); \
12513 } while (0)
12514
12515# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12516 do { \
12517 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12518 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12519 (void)RT_CONCAT(OP_,a_Upper); \
12520 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12521 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12522 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12523 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12524 (void)(a_fDisHints); \
12525 (void)(a_fIemHints); \
12526 } while (0)
12527
12528#else
12529# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12530
12531# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12532 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12533# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12534 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12535# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12536 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12537# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12538 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12539# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12540 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12541
12542#endif
12543
12544#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12545 IEMOP_MNEMONIC0EX(a_Lower, \
12546 #a_Lower, \
12547 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12548#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12549 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12550 #a_Lower " " #a_Op1, \
12551 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12552#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12553 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12554 #a_Lower " " #a_Op1 "," #a_Op2, \
12555 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12556#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12557 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12558 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12559 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12560#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12561 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12562 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12563 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12564
12565/** @} */
12566
12567
12568/** @name Opcode Helpers.
12569 * @{
12570 */
12571
12572#ifdef IN_RING3
12573# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12574 do { \
12575 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12576 else \
12577 { \
12578 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12579 return IEMOP_RAISE_INVALID_OPCODE(); \
12580 } \
12581 } while (0)
12582#else
12583# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12584 do { \
12585 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12586 else return IEMOP_RAISE_INVALID_OPCODE(); \
12587 } while (0)
12588#endif
12589
12590/** The instruction requires a 186 or later. */
12591#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12592# define IEMOP_HLP_MIN_186() do { } while (0)
12593#else
12594# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12595#endif
12596
12597/** The instruction requires a 286 or later. */
12598#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12599# define IEMOP_HLP_MIN_286() do { } while (0)
12600#else
12601# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12602#endif
12603
12604/** The instruction requires a 386 or later. */
12605#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12606# define IEMOP_HLP_MIN_386() do { } while (0)
12607#else
12608# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12609#endif
12610
12611/** The instruction requires a 386 or later if the given expression is true. */
12612#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12613# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12614#else
12615# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12616#endif
12617
12618/** The instruction requires a 486 or later. */
12619#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12620# define IEMOP_HLP_MIN_486() do { } while (0)
12621#else
12622# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12623#endif
12624
12625/** The instruction requires a Pentium (586) or later. */
12626#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12627# define IEMOP_HLP_MIN_586() do { } while (0)
12628#else
12629# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12630#endif
12631
12632/** The instruction requires a PentiumPro (686) or later. */
12633#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12634# define IEMOP_HLP_MIN_686() do { } while (0)
12635#else
12636# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12637#endif
12638
12639
12640/** The instruction raises an \#UD in real and V8086 mode. */
12641#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12642 do \
12643 { \
12644 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12645 else return IEMOP_RAISE_INVALID_OPCODE(); \
12646 } while (0)
12647
12648#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12649/** This instruction raises an \#UD in real and V8086 mode or when not using a
12650 * 64-bit code segment when in long mode (applicable to all VMX instructions
12651 * except VMCALL).
12652 */
12653#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12654 do \
12655 { \
12656 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12657 && ( !IEM_IS_LONG_MODE(pVCpu) \
12658 || IEM_IS_64BIT_CODE(pVCpu))) \
12659 { /* likely */ } \
12660 else \
12661 { \
12662 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12663 { \
12664 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12665 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12666 return IEMOP_RAISE_INVALID_OPCODE(); \
12667 } \
12668 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12669 { \
12670 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12671 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12672 return IEMOP_RAISE_INVALID_OPCODE(); \
12673 } \
12674 } \
12675 } while (0)
12676
12677/** The instruction can only be executed in VMX operation (VMX root mode and
12678 * non-root mode).
12679 *
12680 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12681 */
12682# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12683 do \
12684 { \
12685 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12686 else \
12687 { \
12688 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12689 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12690 return IEMOP_RAISE_INVALID_OPCODE(); \
12691 } \
12692 } while (0)
12693#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12694
12695/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12696 * 64-bit mode. */
12697#define IEMOP_HLP_NO_64BIT() \
12698 do \
12699 { \
12700 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12701 return IEMOP_RAISE_INVALID_OPCODE(); \
12702 } while (0)
12703
12704/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12705 * 64-bit mode. */
12706#define IEMOP_HLP_ONLY_64BIT() \
12707 do \
12708 { \
12709 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12710 return IEMOP_RAISE_INVALID_OPCODE(); \
12711 } while (0)
12712
12713/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12714#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12715 do \
12716 { \
12717 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12718 iemRecalEffOpSize64Default(pVCpu); \
12719 } while (0)
12720
12721/** The instruction has 64-bit operand size if 64-bit mode. */
12722#define IEMOP_HLP_64BIT_OP_SIZE() \
12723 do \
12724 { \
12725 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12726 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12727 } while (0)
12728
12729/** Only a REX prefix immediately preceeding the first opcode byte takes
12730 * effect. This macro helps ensuring this as well as logging bad guest code. */
12731#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12732 do \
12733 { \
12734 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12735 { \
12736 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12737 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12738 pVCpu->iem.s.uRexB = 0; \
12739 pVCpu->iem.s.uRexIndex = 0; \
12740 pVCpu->iem.s.uRexReg = 0; \
12741 iemRecalEffOpSize(pVCpu); \
12742 } \
12743 } while (0)
12744
12745/**
12746 * Done decoding.
12747 */
12748#define IEMOP_HLP_DONE_DECODING() \
12749 do \
12750 { \
12751 /*nothing for now, maybe later... */ \
12752 } while (0)
12753
12754/**
12755 * Done decoding, raise \#UD exception if lock prefix present.
12756 */
12757#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12758 do \
12759 { \
12760 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12761 { /* likely */ } \
12762 else \
12763 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12764 } while (0)
12765
12766
12767/**
12768 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12769 * repnz or size prefixes are present, or if in real or v8086 mode.
12770 */
12771#define IEMOP_HLP_DONE_VEX_DECODING() \
12772 do \
12773 { \
12774 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12775 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12776 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12777 { /* likely */ } \
12778 else \
12779 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12780 } while (0)
12781
12782/**
12783 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12784 * repnz or size prefixes are present, or if in real or v8086 mode.
12785 */
12786#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12787 do \
12788 { \
12789 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12790 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12791 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12792 && pVCpu->iem.s.uVexLength == 0)) \
12793 { /* likely */ } \
12794 else \
12795 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12796 } while (0)
12797
12798
12799/**
12800 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12801 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12802 * register 0, or if in real or v8086 mode.
12803 */
12804#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12805 do \
12806 { \
12807 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12808 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12809 && !pVCpu->iem.s.uVex3rdReg \
12810 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12811 { /* likely */ } \
12812 else \
12813 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12814 } while (0)
12815
12816/**
12817 * Done decoding VEX, no V, L=0.
12818 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12819 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12820 */
12821#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12822 do \
12823 { \
12824 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12825 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12826 && pVCpu->iem.s.uVexLength == 0 \
12827 && pVCpu->iem.s.uVex3rdReg == 0 \
12828 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12829 { /* likely */ } \
12830 else \
12831 return IEMOP_RAISE_INVALID_OPCODE(); \
12832 } while (0)
12833
12834#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12835 do \
12836 { \
12837 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12838 { /* likely */ } \
12839 else \
12840 { \
12841 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12842 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12843 } \
12844 } while (0)
12845#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12846 do \
12847 { \
12848 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12849 { /* likely */ } \
12850 else \
12851 { \
12852 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12853 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12854 } \
12855 } while (0)
12856
12857/**
12858 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12859 * are present.
12860 */
12861#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12862 do \
12863 { \
12864 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12865 { /* likely */ } \
12866 else \
12867 return IEMOP_RAISE_INVALID_OPCODE(); \
12868 } while (0)
12869
12870/**
12871 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12872 * prefixes are present.
12873 */
12874#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12875 do \
12876 { \
12877 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12878 { /* likely */ } \
12879 else \
12880 return IEMOP_RAISE_INVALID_OPCODE(); \
12881 } while (0)
12882
12883
12884/**
12885 * Calculates the effective address of a ModR/M memory operand.
12886 *
12887 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12888 *
12889 * @return Strict VBox status code.
12890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12891 * @param bRm The ModRM byte.
12892 * @param cbImm The size of any immediate following the
12893 * effective address opcode bytes. Important for
12894 * RIP relative addressing.
12895 * @param pGCPtrEff Where to return the effective address.
12896 */
12897IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12898{
12899 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12900# define SET_SS_DEF() \
12901 do \
12902 { \
12903 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12904 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12905 } while (0)
12906
12907 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12908 {
12909/** @todo Check the effective address size crap! */
12910 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12911 {
12912 uint16_t u16EffAddr;
12913
12914 /* Handle the disp16 form with no registers first. */
12915 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12916 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12917 else
12918 {
12919 /* Get the displacment. */
12920 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12921 {
12922 case 0: u16EffAddr = 0; break;
12923 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12924 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12925 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12926 }
12927
12928 /* Add the base and index registers to the disp. */
12929 switch (bRm & X86_MODRM_RM_MASK)
12930 {
12931 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12932 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12933 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12934 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12935 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12936 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12937 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12938 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12939 }
12940 }
12941
12942 *pGCPtrEff = u16EffAddr;
12943 }
12944 else
12945 {
12946 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12947 uint32_t u32EffAddr;
12948
12949 /* Handle the disp32 form with no registers first. */
12950 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12951 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12952 else
12953 {
12954 /* Get the register (or SIB) value. */
12955 switch ((bRm & X86_MODRM_RM_MASK))
12956 {
12957 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12958 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12959 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12960 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12961 case 4: /* SIB */
12962 {
12963 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12964
12965 /* Get the index and scale it. */
12966 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12967 {
12968 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12969 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12970 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12971 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12972 case 4: u32EffAddr = 0; /*none */ break;
12973 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12974 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12975 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12976 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12977 }
12978 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12979
12980 /* add base */
12981 switch (bSib & X86_SIB_BASE_MASK)
12982 {
12983 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12984 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12985 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12986 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12987 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12988 case 5:
12989 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12990 {
12991 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12992 SET_SS_DEF();
12993 }
12994 else
12995 {
12996 uint32_t u32Disp;
12997 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12998 u32EffAddr += u32Disp;
12999 }
13000 break;
13001 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13002 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13003 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13004 }
13005 break;
13006 }
13007 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13008 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13009 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13010 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13011 }
13012
13013 /* Get and add the displacement. */
13014 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13015 {
13016 case 0:
13017 break;
13018 case 1:
13019 {
13020 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13021 u32EffAddr += i8Disp;
13022 break;
13023 }
13024 case 2:
13025 {
13026 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13027 u32EffAddr += u32Disp;
13028 break;
13029 }
13030 default:
13031 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13032 }
13033
13034 }
13035 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13036 *pGCPtrEff = u32EffAddr;
13037 else
13038 {
13039 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13040 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13041 }
13042 }
13043 }
13044 else
13045 {
13046 uint64_t u64EffAddr;
13047
13048 /* Handle the rip+disp32 form with no registers first. */
13049 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13050 {
13051 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13052 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13053 }
13054 else
13055 {
13056 /* Get the register (or SIB) value. */
13057 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13058 {
13059 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13060 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13061 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13062 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13063 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13064 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13065 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13066 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13067 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13068 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13069 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13070 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13071 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13072 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13073 /* SIB */
13074 case 4:
13075 case 12:
13076 {
13077 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13078
13079 /* Get the index and scale it. */
13080 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13081 {
13082 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13083 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13084 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13085 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13086 case 4: u64EffAddr = 0; /*none */ break;
13087 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13088 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13089 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13090 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13091 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13092 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13093 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13094 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13095 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13096 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13097 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13098 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13099 }
13100 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13101
13102 /* add base */
13103 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13104 {
13105 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13106 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13107 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13108 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13109 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13110 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13111 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13112 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13113 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13114 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13115 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13116 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13117 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13118 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13119 /* complicated encodings */
13120 case 5:
13121 case 13:
13122 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13123 {
13124 if (!pVCpu->iem.s.uRexB)
13125 {
13126 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13127 SET_SS_DEF();
13128 }
13129 else
13130 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13131 }
13132 else
13133 {
13134 uint32_t u32Disp;
13135 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13136 u64EffAddr += (int32_t)u32Disp;
13137 }
13138 break;
13139 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13140 }
13141 break;
13142 }
13143 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13144 }
13145
13146 /* Get and add the displacement. */
13147 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13148 {
13149 case 0:
13150 break;
13151 case 1:
13152 {
13153 int8_t i8Disp;
13154 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13155 u64EffAddr += i8Disp;
13156 break;
13157 }
13158 case 2:
13159 {
13160 uint32_t u32Disp;
13161 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13162 u64EffAddr += (int32_t)u32Disp;
13163 break;
13164 }
13165 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13166 }
13167
13168 }
13169
13170 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13171 *pGCPtrEff = u64EffAddr;
13172 else
13173 {
13174 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13175 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13176 }
13177 }
13178
13179 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13180 return VINF_SUCCESS;
13181}
13182
13183
13184/**
13185 * Calculates the effective address of a ModR/M memory operand.
13186 *
13187 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13188 *
13189 * @return Strict VBox status code.
13190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13191 * @param bRm The ModRM byte.
13192 * @param cbImm The size of any immediate following the
13193 * effective address opcode bytes. Important for
13194 * RIP relative addressing.
13195 * @param pGCPtrEff Where to return the effective address.
13196 * @param offRsp RSP displacement.
13197 */
13198IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13199{
13200 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13201# define SET_SS_DEF() \
13202 do \
13203 { \
13204 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13205 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13206 } while (0)
13207
13208 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13209 {
13210/** @todo Check the effective address size crap! */
13211 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13212 {
13213 uint16_t u16EffAddr;
13214
13215 /* Handle the disp16 form with no registers first. */
13216 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13217 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13218 else
13219 {
13220 /* Get the displacment. */
13221 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13222 {
13223 case 0: u16EffAddr = 0; break;
13224 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13225 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13226 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13227 }
13228
13229 /* Add the base and index registers to the disp. */
13230 switch (bRm & X86_MODRM_RM_MASK)
13231 {
13232 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13233 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13234 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13235 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13236 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13237 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13238 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13239 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13240 }
13241 }
13242
13243 *pGCPtrEff = u16EffAddr;
13244 }
13245 else
13246 {
13247 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13248 uint32_t u32EffAddr;
13249
13250 /* Handle the disp32 form with no registers first. */
13251 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13252 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13253 else
13254 {
13255 /* Get the register (or SIB) value. */
13256 switch ((bRm & X86_MODRM_RM_MASK))
13257 {
13258 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13259 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13260 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13261 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13262 case 4: /* SIB */
13263 {
13264 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13265
13266 /* Get the index and scale it. */
13267 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13268 {
13269 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13270 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13271 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13272 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13273 case 4: u32EffAddr = 0; /*none */ break;
13274 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13275 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13276 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13277 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13278 }
13279 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13280
13281 /* add base */
13282 switch (bSib & X86_SIB_BASE_MASK)
13283 {
13284 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13285 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13286 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13287 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13288 case 4:
13289 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13290 SET_SS_DEF();
13291 break;
13292 case 5:
13293 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13294 {
13295 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13296 SET_SS_DEF();
13297 }
13298 else
13299 {
13300 uint32_t u32Disp;
13301 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13302 u32EffAddr += u32Disp;
13303 }
13304 break;
13305 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13306 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13307 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13308 }
13309 break;
13310 }
13311 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13312 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13313 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13314 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13315 }
13316
13317 /* Get and add the displacement. */
13318 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13319 {
13320 case 0:
13321 break;
13322 case 1:
13323 {
13324 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13325 u32EffAddr += i8Disp;
13326 break;
13327 }
13328 case 2:
13329 {
13330 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13331 u32EffAddr += u32Disp;
13332 break;
13333 }
13334 default:
13335 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13336 }
13337
13338 }
13339 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13340 *pGCPtrEff = u32EffAddr;
13341 else
13342 {
13343 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13344 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13345 }
13346 }
13347 }
13348 else
13349 {
13350 uint64_t u64EffAddr;
13351
13352 /* Handle the rip+disp32 form with no registers first. */
13353 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13354 {
13355 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13356 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13357 }
13358 else
13359 {
13360 /* Get the register (or SIB) value. */
13361 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13362 {
13363 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13364 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13365 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13366 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13367 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13368 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13369 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13370 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13371 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13372 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13373 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13374 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13375 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13376 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13377 /* SIB */
13378 case 4:
13379 case 12:
13380 {
13381 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13382
13383 /* Get the index and scale it. */
13384 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13385 {
13386 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13387 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13388 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13389 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13390 case 4: u64EffAddr = 0; /*none */ break;
13391 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13392 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13393 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13394 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13395 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13396 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13397 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13398 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13399 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13400 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13401 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13402 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13403 }
13404 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13405
13406 /* add base */
13407 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13408 {
13409 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13410 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13411 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13412 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13413 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13414 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13415 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13416 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13417 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13418 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13419 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13420 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13421 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13422 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13423 /* complicated encodings */
13424 case 5:
13425 case 13:
13426 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13427 {
13428 if (!pVCpu->iem.s.uRexB)
13429 {
13430 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13431 SET_SS_DEF();
13432 }
13433 else
13434 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13435 }
13436 else
13437 {
13438 uint32_t u32Disp;
13439 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13440 u64EffAddr += (int32_t)u32Disp;
13441 }
13442 break;
13443 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13444 }
13445 break;
13446 }
13447 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13448 }
13449
13450 /* Get and add the displacement. */
13451 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13452 {
13453 case 0:
13454 break;
13455 case 1:
13456 {
13457 int8_t i8Disp;
13458 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13459 u64EffAddr += i8Disp;
13460 break;
13461 }
13462 case 2:
13463 {
13464 uint32_t u32Disp;
13465 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13466 u64EffAddr += (int32_t)u32Disp;
13467 break;
13468 }
13469 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13470 }
13471
13472 }
13473
13474 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13475 *pGCPtrEff = u64EffAddr;
13476 else
13477 {
13478 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13479 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13480 }
13481 }
13482
13483 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13484 return VINF_SUCCESS;
13485}
13486
13487
13488#ifdef IEM_WITH_SETJMP
13489/**
13490 * Calculates the effective address of a ModR/M memory operand.
13491 *
13492 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13493 *
13494 * May longjmp on internal error.
13495 *
13496 * @return The effective address.
13497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13498 * @param bRm The ModRM byte.
13499 * @param cbImm The size of any immediate following the
13500 * effective address opcode bytes. Important for
13501 * RIP relative addressing.
13502 */
13503IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13504{
13505 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13506# define SET_SS_DEF() \
13507 do \
13508 { \
13509 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13510 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13511 } while (0)
13512
13513 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13514 {
13515/** @todo Check the effective address size crap! */
13516 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13517 {
13518 uint16_t u16EffAddr;
13519
13520 /* Handle the disp16 form with no registers first. */
13521 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13522 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13523 else
13524 {
13525 /* Get the displacment. */
13526 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13527 {
13528 case 0: u16EffAddr = 0; break;
13529 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13530 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13531 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13532 }
13533
13534 /* Add the base and index registers to the disp. */
13535 switch (bRm & X86_MODRM_RM_MASK)
13536 {
13537 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13538 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13539 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13540 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13541 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13542 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13543 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13544 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13545 }
13546 }
13547
13548 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13549 return u16EffAddr;
13550 }
13551
13552 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13553 uint32_t u32EffAddr;
13554
13555 /* Handle the disp32 form with no registers first. */
13556 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13557 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13558 else
13559 {
13560 /* Get the register (or SIB) value. */
13561 switch ((bRm & X86_MODRM_RM_MASK))
13562 {
13563 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13564 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13565 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13566 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13567 case 4: /* SIB */
13568 {
13569 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13570
13571 /* Get the index and scale it. */
13572 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13573 {
13574 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13575 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13576 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13577 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13578 case 4: u32EffAddr = 0; /*none */ break;
13579 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13580 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13581 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13582 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13583 }
13584 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13585
13586 /* add base */
13587 switch (bSib & X86_SIB_BASE_MASK)
13588 {
13589 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13590 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13591 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13592 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13593 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13594 case 5:
13595 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13596 {
13597 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13598 SET_SS_DEF();
13599 }
13600 else
13601 {
13602 uint32_t u32Disp;
13603 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13604 u32EffAddr += u32Disp;
13605 }
13606 break;
13607 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13608 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13609 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13610 }
13611 break;
13612 }
13613 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13614 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13615 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13616 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13617 }
13618
13619 /* Get and add the displacement. */
13620 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13621 {
13622 case 0:
13623 break;
13624 case 1:
13625 {
13626 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13627 u32EffAddr += i8Disp;
13628 break;
13629 }
13630 case 2:
13631 {
13632 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13633 u32EffAddr += u32Disp;
13634 break;
13635 }
13636 default:
13637 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13638 }
13639 }
13640
13641 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13642 {
13643 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13644 return u32EffAddr;
13645 }
13646 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13647 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13648 return u32EffAddr & UINT16_MAX;
13649 }
13650
13651 uint64_t u64EffAddr;
13652
13653 /* Handle the rip+disp32 form with no registers first. */
13654 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13655 {
13656 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13657 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13658 }
13659 else
13660 {
13661 /* Get the register (or SIB) value. */
13662 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13663 {
13664 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13665 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13666 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13667 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13668 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13669 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13670 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13671 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13672 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13673 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13674 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13675 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13676 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13677 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13678 /* SIB */
13679 case 4:
13680 case 12:
13681 {
13682 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13683
13684 /* Get the index and scale it. */
13685 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13686 {
13687 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13688 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13689 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13690 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13691 case 4: u64EffAddr = 0; /*none */ break;
13692 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13693 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13694 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13695 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13696 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13697 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13698 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13699 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13700 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13701 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13702 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13703 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13704 }
13705 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13706
13707 /* add base */
13708 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13709 {
13710 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13711 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13712 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13713 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13714 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13715 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13716 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13717 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13718 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13719 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13720 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13721 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13722 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13723 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13724 /* complicated encodings */
13725 case 5:
13726 case 13:
13727 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13728 {
13729 if (!pVCpu->iem.s.uRexB)
13730 {
13731 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13732 SET_SS_DEF();
13733 }
13734 else
13735 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13736 }
13737 else
13738 {
13739 uint32_t u32Disp;
13740 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13741 u64EffAddr += (int32_t)u32Disp;
13742 }
13743 break;
13744 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13745 }
13746 break;
13747 }
13748 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13749 }
13750
13751 /* Get and add the displacement. */
13752 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13753 {
13754 case 0:
13755 break;
13756 case 1:
13757 {
13758 int8_t i8Disp;
13759 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13760 u64EffAddr += i8Disp;
13761 break;
13762 }
13763 case 2:
13764 {
13765 uint32_t u32Disp;
13766 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13767 u64EffAddr += (int32_t)u32Disp;
13768 break;
13769 }
13770 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13771 }
13772
13773 }
13774
13775 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13776 {
13777 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13778 return u64EffAddr;
13779 }
13780 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13781 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13782 return u64EffAddr & UINT32_MAX;
13783}
13784#endif /* IEM_WITH_SETJMP */
13785
13786/** @} */
13787
13788
13789
13790/*
13791 * Include the instructions
13792 */
13793#include "IEMAllInstructions.cpp.h"
13794
13795
13796
13797#ifdef LOG_ENABLED
13798/**
13799 * Logs the current instruction.
13800 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13801 * @param fSameCtx Set if we have the same context information as the VMM,
13802 * clear if we may have already executed an instruction in
13803 * our debug context. When clear, we assume IEMCPU holds
13804 * valid CPU mode info.
13805 *
13806 * The @a fSameCtx parameter is now misleading and obsolete.
13807 * @param pszFunction The IEM function doing the execution.
13808 */
13809IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13810{
13811# ifdef IN_RING3
13812 if (LogIs2Enabled())
13813 {
13814 char szInstr[256];
13815 uint32_t cbInstr = 0;
13816 if (fSameCtx)
13817 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13818 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13819 szInstr, sizeof(szInstr), &cbInstr);
13820 else
13821 {
13822 uint32_t fFlags = 0;
13823 switch (pVCpu->iem.s.enmCpuMode)
13824 {
13825 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13826 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13827 case IEMMODE_16BIT:
13828 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13829 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13830 else
13831 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13832 break;
13833 }
13834 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13835 szInstr, sizeof(szInstr), &cbInstr);
13836 }
13837
13838 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13839 Log2(("**** %s\n"
13840 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13841 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13842 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13843 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13844 " %s\n"
13845 , pszFunction,
13846 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13847 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13848 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13849 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13850 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13851 szInstr));
13852
13853 if (LogIs3Enabled())
13854 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13855 }
13856 else
13857# endif
13858 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13859 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13860 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13861}
13862#endif /* LOG_ENABLED */
13863
13864
13865/**
13866 * Makes status code addjustments (pass up from I/O and access handler)
13867 * as well as maintaining statistics.
13868 *
13869 * @returns Strict VBox status code to pass up.
13870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13871 * @param rcStrict The status from executing an instruction.
13872 */
13873DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13874{
13875 if (rcStrict != VINF_SUCCESS)
13876 {
13877 if (RT_SUCCESS(rcStrict))
13878 {
13879 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13880 || rcStrict == VINF_IOM_R3_IOPORT_READ
13881 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13882 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13883 || rcStrict == VINF_IOM_R3_MMIO_READ
13884 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13885 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13886 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13887 || rcStrict == VINF_CPUM_R3_MSR_READ
13888 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13889 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13890 || rcStrict == VINF_EM_RAW_TO_R3
13891 || rcStrict == VINF_EM_TRIPLE_FAULT
13892 || rcStrict == VINF_GIM_R3_HYPERCALL
13893 /* raw-mode / virt handlers only: */
13894 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13895 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13896 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13897 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13898 || rcStrict == VINF_SELM_SYNC_GDT
13899 || rcStrict == VINF_CSAM_PENDING_ACTION
13900 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13901 /* nested hw.virt codes: */
13902 || rcStrict == VINF_VMX_VMEXIT
13903 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13904 || rcStrict == VINF_SVM_VMEXIT
13905 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13906/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13907 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13908#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13909 if ( rcStrict == VINF_VMX_VMEXIT
13910 && rcPassUp == VINF_SUCCESS)
13911 rcStrict = VINF_SUCCESS;
13912 else
13913#endif
13914#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13915 if ( rcStrict == VINF_SVM_VMEXIT
13916 && rcPassUp == VINF_SUCCESS)
13917 rcStrict = VINF_SUCCESS;
13918 else
13919#endif
13920 if (rcPassUp == VINF_SUCCESS)
13921 pVCpu->iem.s.cRetInfStatuses++;
13922 else if ( rcPassUp < VINF_EM_FIRST
13923 || rcPassUp > VINF_EM_LAST
13924 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13925 {
13926 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13927 pVCpu->iem.s.cRetPassUpStatus++;
13928 rcStrict = rcPassUp;
13929 }
13930 else
13931 {
13932 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13933 pVCpu->iem.s.cRetInfStatuses++;
13934 }
13935 }
13936 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13937 pVCpu->iem.s.cRetAspectNotImplemented++;
13938 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13939 pVCpu->iem.s.cRetInstrNotImplemented++;
13940 else
13941 pVCpu->iem.s.cRetErrStatuses++;
13942 }
13943 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13944 {
13945 pVCpu->iem.s.cRetPassUpStatus++;
13946 rcStrict = pVCpu->iem.s.rcPassUp;
13947 }
13948
13949 return rcStrict;
13950}
13951
13952
13953/**
13954 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13955 * IEMExecOneWithPrefetchedByPC.
13956 *
13957 * Similar code is found in IEMExecLots.
13958 *
13959 * @return Strict VBox status code.
13960 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13961 * @param fExecuteInhibit If set, execute the instruction following CLI,
13962 * POP SS and MOV SS,GR.
13963 * @param pszFunction The calling function name.
13964 */
13965DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13966{
13967 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13968 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13969 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13970 RT_NOREF_PV(pszFunction);
13971
13972#ifdef IEM_WITH_SETJMP
13973 VBOXSTRICTRC rcStrict;
13974 jmp_buf JmpBuf;
13975 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13976 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13977 if ((rcStrict = setjmp(JmpBuf)) == 0)
13978 {
13979 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13980 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13981 }
13982 else
13983 pVCpu->iem.s.cLongJumps++;
13984 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13985#else
13986 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13987 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13988#endif
13989 if (rcStrict == VINF_SUCCESS)
13990 pVCpu->iem.s.cInstructions++;
13991 if (pVCpu->iem.s.cActiveMappings > 0)
13992 {
13993 Assert(rcStrict != VINF_SUCCESS);
13994 iemMemRollback(pVCpu);
13995 }
13996 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13997 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13998 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13999
14000//#ifdef DEBUG
14001// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14002//#endif
14003
14004#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14005 /*
14006 * Perform any VMX nested-guest instruction boundary actions.
14007 *
14008 * If any of these causes a VM-exit, we must skip executing the next
14009 * instruction (would run into stale page tables). A VM-exit makes sure
14010 * there is no interrupt-inhibition, so that should ensure we don't go
14011 * to try execute the next instruction. Clearing fExecuteInhibit is
14012 * problematic because of the setjmp/longjmp clobbering above.
14013 */
14014 if ( rcStrict == VINF_SUCCESS
14015 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14016 {
14017 /* TPR-below threshold/APIC write has the highest priority. */
14018 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14019 {
14020 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14021 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14022 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14023 }
14024 /* MTF takes priority over VMX-preemption timer. */
14025 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14026 {
14027 rcStrict = iemVmxVmexitMtf(pVCpu);
14028 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14029 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14030 }
14031 /** Finally, check if the VMX preemption timer has expired. */
14032 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14033 {
14034 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14035 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14036 rcStrict = VINF_SUCCESS;
14037 else
14038 {
14039 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14040 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14041 }
14042 }
14043 }
14044#endif
14045
14046 /* Execute the next instruction as well if a cli, pop ss or
14047 mov ss, Gr has just completed successfully. */
14048 if ( fExecuteInhibit
14049 && rcStrict == VINF_SUCCESS
14050 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14051 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14052 {
14053 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14054 if (rcStrict == VINF_SUCCESS)
14055 {
14056#ifdef LOG_ENABLED
14057 iemLogCurInstr(pVCpu, false, pszFunction);
14058#endif
14059#ifdef IEM_WITH_SETJMP
14060 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14061 if ((rcStrict = setjmp(JmpBuf)) == 0)
14062 {
14063 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14064 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14065 }
14066 else
14067 pVCpu->iem.s.cLongJumps++;
14068 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14069#else
14070 IEM_OPCODE_GET_NEXT_U8(&b);
14071 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14072#endif
14073 if (rcStrict == VINF_SUCCESS)
14074 pVCpu->iem.s.cInstructions++;
14075 if (pVCpu->iem.s.cActiveMappings > 0)
14076 {
14077 Assert(rcStrict != VINF_SUCCESS);
14078 iemMemRollback(pVCpu);
14079 }
14080 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14081 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14082 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14083 }
14084 else if (pVCpu->iem.s.cActiveMappings > 0)
14085 iemMemRollback(pVCpu);
14086 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14087 }
14088
14089 /*
14090 * Return value fiddling, statistics and sanity assertions.
14091 */
14092 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14093
14094 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14095 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14096 return rcStrict;
14097}
14098
14099
14100#ifdef IN_RC
14101/**
14102 * Re-enters raw-mode or ensure we return to ring-3.
14103 *
14104 * @returns rcStrict, maybe modified.
14105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14106 * @param rcStrict The status code returne by the interpreter.
14107 */
14108DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14109{
14110 if ( !pVCpu->iem.s.fInPatchCode
14111 && ( rcStrict == VINF_SUCCESS
14112 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14113 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14114 {
14115 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14116 CPUMRawEnter(pVCpu);
14117 else
14118 {
14119 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14120 rcStrict = VINF_EM_RESCHEDULE;
14121 }
14122 }
14123 return rcStrict;
14124}
14125#endif
14126
14127
14128/**
14129 * Execute one instruction.
14130 *
14131 * @return Strict VBox status code.
14132 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14133 */
14134VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14135{
14136#ifdef LOG_ENABLED
14137 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14138#endif
14139
14140 /*
14141 * Do the decoding and emulation.
14142 */
14143 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14144 if (rcStrict == VINF_SUCCESS)
14145 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14146 else if (pVCpu->iem.s.cActiveMappings > 0)
14147 iemMemRollback(pVCpu);
14148
14149#ifdef IN_RC
14150 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14151#endif
14152 if (rcStrict != VINF_SUCCESS)
14153 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14154 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14155 return rcStrict;
14156}
14157
14158
14159VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14160{
14161 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14162
14163 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14164 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14165 if (rcStrict == VINF_SUCCESS)
14166 {
14167 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14168 if (pcbWritten)
14169 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14170 }
14171 else if (pVCpu->iem.s.cActiveMappings > 0)
14172 iemMemRollback(pVCpu);
14173
14174#ifdef IN_RC
14175 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14176#endif
14177 return rcStrict;
14178}
14179
14180
14181VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14182 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14183{
14184 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14185
14186 VBOXSTRICTRC rcStrict;
14187 if ( cbOpcodeBytes
14188 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14189 {
14190 iemInitDecoder(pVCpu, false);
14191#ifdef IEM_WITH_CODE_TLB
14192 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14193 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14194 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14195 pVCpu->iem.s.offCurInstrStart = 0;
14196 pVCpu->iem.s.offInstrNextByte = 0;
14197#else
14198 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14199 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14200#endif
14201 rcStrict = VINF_SUCCESS;
14202 }
14203 else
14204 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14205 if (rcStrict == VINF_SUCCESS)
14206 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14207 else if (pVCpu->iem.s.cActiveMappings > 0)
14208 iemMemRollback(pVCpu);
14209
14210#ifdef IN_RC
14211 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14212#endif
14213 return rcStrict;
14214}
14215
14216
14217VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14218{
14219 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14220
14221 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14222 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14223 if (rcStrict == VINF_SUCCESS)
14224 {
14225 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14226 if (pcbWritten)
14227 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14228 }
14229 else if (pVCpu->iem.s.cActiveMappings > 0)
14230 iemMemRollback(pVCpu);
14231
14232#ifdef IN_RC
14233 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14234#endif
14235 return rcStrict;
14236}
14237
14238
14239VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14240 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14241{
14242 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14243
14244 VBOXSTRICTRC rcStrict;
14245 if ( cbOpcodeBytes
14246 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14247 {
14248 iemInitDecoder(pVCpu, true);
14249#ifdef IEM_WITH_CODE_TLB
14250 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14251 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14252 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14253 pVCpu->iem.s.offCurInstrStart = 0;
14254 pVCpu->iem.s.offInstrNextByte = 0;
14255#else
14256 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14257 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14258#endif
14259 rcStrict = VINF_SUCCESS;
14260 }
14261 else
14262 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14263 if (rcStrict == VINF_SUCCESS)
14264 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14265 else if (pVCpu->iem.s.cActiveMappings > 0)
14266 iemMemRollback(pVCpu);
14267
14268#ifdef IN_RC
14269 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14270#endif
14271 return rcStrict;
14272}
14273
14274
14275/**
14276 * For debugging DISGetParamSize, may come in handy.
14277 *
14278 * @returns Strict VBox status code.
14279 * @param pVCpu The cross context virtual CPU structure of the
14280 * calling EMT.
14281 * @param pCtxCore The context core structure.
14282 * @param OpcodeBytesPC The PC of the opcode bytes.
14283 * @param pvOpcodeBytes Prefeched opcode bytes.
14284 * @param cbOpcodeBytes Number of prefetched bytes.
14285 * @param pcbWritten Where to return the number of bytes written.
14286 * Optional.
14287 */
14288VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14289 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14290 uint32_t *pcbWritten)
14291{
14292 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14293
14294 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14295 VBOXSTRICTRC rcStrict;
14296 if ( cbOpcodeBytes
14297 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14298 {
14299 iemInitDecoder(pVCpu, true);
14300#ifdef IEM_WITH_CODE_TLB
14301 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14302 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14303 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14304 pVCpu->iem.s.offCurInstrStart = 0;
14305 pVCpu->iem.s.offInstrNextByte = 0;
14306#else
14307 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14308 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14309#endif
14310 rcStrict = VINF_SUCCESS;
14311 }
14312 else
14313 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14314 if (rcStrict == VINF_SUCCESS)
14315 {
14316 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14317 if (pcbWritten)
14318 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14319 }
14320 else if (pVCpu->iem.s.cActiveMappings > 0)
14321 iemMemRollback(pVCpu);
14322
14323#ifdef IN_RC
14324 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14325#endif
14326 return rcStrict;
14327}
14328
14329
14330VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14331{
14332 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14333
14334 /*
14335 * See if there is an interrupt pending in TRPM, inject it if we can.
14336 */
14337 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14338#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14339 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14340 if (fIntrEnabled)
14341 {
14342 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14343 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14344 else
14345 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14346 }
14347#else
14348 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14349#endif
14350 if ( fIntrEnabled
14351 && TRPMHasTrap(pVCpu)
14352 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14353 {
14354 uint8_t u8TrapNo;
14355 TRPMEVENT enmType;
14356 RTGCUINT uErrCode;
14357 RTGCPTR uCr2;
14358 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14359 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14360 TRPMResetTrap(pVCpu);
14361 }
14362
14363 /*
14364 * Initial decoder init w/ prefetch, then setup setjmp.
14365 */
14366 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14367 if (rcStrict == VINF_SUCCESS)
14368 {
14369#ifdef IEM_WITH_SETJMP
14370 jmp_buf JmpBuf;
14371 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14372 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14373 pVCpu->iem.s.cActiveMappings = 0;
14374 if ((rcStrict = setjmp(JmpBuf)) == 0)
14375#endif
14376 {
14377 /*
14378 * The run loop. We limit ourselves to 4096 instructions right now.
14379 */
14380 PVM pVM = pVCpu->CTX_SUFF(pVM);
14381 uint32_t cInstr = 4096;
14382 for (;;)
14383 {
14384 /*
14385 * Log the state.
14386 */
14387#ifdef LOG_ENABLED
14388 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14389#endif
14390
14391 /*
14392 * Do the decoding and emulation.
14393 */
14394 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14395 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14396 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14397 {
14398 Assert(pVCpu->iem.s.cActiveMappings == 0);
14399 pVCpu->iem.s.cInstructions++;
14400 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14401 {
14402 uint64_t fCpu = pVCpu->fLocalForcedActions
14403 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14404 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14405 | VMCPU_FF_TLB_FLUSH
14406#ifdef VBOX_WITH_RAW_MODE
14407 | VMCPU_FF_TRPM_SYNC_IDT
14408 | VMCPU_FF_SELM_SYNC_TSS
14409 | VMCPU_FF_SELM_SYNC_GDT
14410 | VMCPU_FF_SELM_SYNC_LDT
14411#endif
14412 | VMCPU_FF_INHIBIT_INTERRUPTS
14413 | VMCPU_FF_BLOCK_NMIS
14414 | VMCPU_FF_UNHALT ));
14415
14416 if (RT_LIKELY( ( !fCpu
14417 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14418 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14419 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14420 {
14421 if (cInstr-- > 0)
14422 {
14423 Assert(pVCpu->iem.s.cActiveMappings == 0);
14424 iemReInitDecoder(pVCpu);
14425 continue;
14426 }
14427 }
14428 }
14429 Assert(pVCpu->iem.s.cActiveMappings == 0);
14430 }
14431 else if (pVCpu->iem.s.cActiveMappings > 0)
14432 iemMemRollback(pVCpu);
14433 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14434 break;
14435 }
14436 }
14437#ifdef IEM_WITH_SETJMP
14438 else
14439 {
14440 if (pVCpu->iem.s.cActiveMappings > 0)
14441 iemMemRollback(pVCpu);
14442 pVCpu->iem.s.cLongJumps++;
14443 }
14444 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14445#endif
14446
14447 /*
14448 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14449 */
14450 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14451 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14452 }
14453 else
14454 {
14455 if (pVCpu->iem.s.cActiveMappings > 0)
14456 iemMemRollback(pVCpu);
14457
14458#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14459 /*
14460 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14461 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14462 */
14463 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14464#endif
14465 }
14466
14467 /*
14468 * Maybe re-enter raw-mode and log.
14469 */
14470#ifdef IN_RC
14471 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14472#endif
14473 if (rcStrict != VINF_SUCCESS)
14474 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14475 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14476 if (pcInstructions)
14477 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14478 return rcStrict;
14479}
14480
14481
14482/**
14483 * Interface used by EMExecuteExec, does exit statistics and limits.
14484 *
14485 * @returns Strict VBox status code.
14486 * @param pVCpu The cross context virtual CPU structure.
14487 * @param fWillExit To be defined.
14488 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14489 * @param cMaxInstructions Maximum number of instructions to execute.
14490 * @param cMaxInstructionsWithoutExits
14491 * The max number of instructions without exits.
14492 * @param pStats Where to return statistics.
14493 */
14494VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14495 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14496{
14497 NOREF(fWillExit); /** @todo define flexible exit crits */
14498
14499 /*
14500 * Initialize return stats.
14501 */
14502 pStats->cInstructions = 0;
14503 pStats->cExits = 0;
14504 pStats->cMaxExitDistance = 0;
14505 pStats->cReserved = 0;
14506
14507 /*
14508 * Initial decoder init w/ prefetch, then setup setjmp.
14509 */
14510 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14511 if (rcStrict == VINF_SUCCESS)
14512 {
14513#ifdef IEM_WITH_SETJMP
14514 jmp_buf JmpBuf;
14515 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14516 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14517 pVCpu->iem.s.cActiveMappings = 0;
14518 if ((rcStrict = setjmp(JmpBuf)) == 0)
14519#endif
14520 {
14521#ifdef IN_RING0
14522 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14523#endif
14524 uint32_t cInstructionSinceLastExit = 0;
14525
14526 /*
14527 * The run loop. We limit ourselves to 4096 instructions right now.
14528 */
14529 PVM pVM = pVCpu->CTX_SUFF(pVM);
14530 for (;;)
14531 {
14532 /*
14533 * Log the state.
14534 */
14535#ifdef LOG_ENABLED
14536 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14537#endif
14538
14539 /*
14540 * Do the decoding and emulation.
14541 */
14542 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14543
14544 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14545 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14546
14547 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14548 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14549 {
14550 pStats->cExits += 1;
14551 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14552 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14553 cInstructionSinceLastExit = 0;
14554 }
14555
14556 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14557 {
14558 Assert(pVCpu->iem.s.cActiveMappings == 0);
14559 pVCpu->iem.s.cInstructions++;
14560 pStats->cInstructions++;
14561 cInstructionSinceLastExit++;
14562 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14563 {
14564 uint64_t fCpu = pVCpu->fLocalForcedActions
14565 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14566 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14567 | VMCPU_FF_TLB_FLUSH
14568#ifdef VBOX_WITH_RAW_MODE
14569 | VMCPU_FF_TRPM_SYNC_IDT
14570 | VMCPU_FF_SELM_SYNC_TSS
14571 | VMCPU_FF_SELM_SYNC_GDT
14572 | VMCPU_FF_SELM_SYNC_LDT
14573#endif
14574 | VMCPU_FF_INHIBIT_INTERRUPTS
14575 | VMCPU_FF_BLOCK_NMIS
14576 | VMCPU_FF_UNHALT ));
14577
14578 if (RT_LIKELY( ( ( !fCpu
14579 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14580 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14581 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14582 || pStats->cInstructions < cMinInstructions))
14583 {
14584 if (pStats->cInstructions < cMaxInstructions)
14585 {
14586 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14587 {
14588#ifdef IN_RING0
14589 if ( !fCheckPreemptionPending
14590 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14591#endif
14592 {
14593 Assert(pVCpu->iem.s.cActiveMappings == 0);
14594 iemReInitDecoder(pVCpu);
14595 continue;
14596 }
14597#ifdef IN_RING0
14598 rcStrict = VINF_EM_RAW_INTERRUPT;
14599 break;
14600#endif
14601 }
14602 }
14603 }
14604 Assert(!(fCpu & VMCPU_FF_IEM));
14605 }
14606 Assert(pVCpu->iem.s.cActiveMappings == 0);
14607 }
14608 else if (pVCpu->iem.s.cActiveMappings > 0)
14609 iemMemRollback(pVCpu);
14610 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14611 break;
14612 }
14613 }
14614#ifdef IEM_WITH_SETJMP
14615 else
14616 {
14617 if (pVCpu->iem.s.cActiveMappings > 0)
14618 iemMemRollback(pVCpu);
14619 pVCpu->iem.s.cLongJumps++;
14620 }
14621 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14622#endif
14623
14624 /*
14625 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14626 */
14627 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14628 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14629 }
14630 else
14631 {
14632 if (pVCpu->iem.s.cActiveMappings > 0)
14633 iemMemRollback(pVCpu);
14634
14635#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14636 /*
14637 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14638 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14639 */
14640 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14641#endif
14642 }
14643
14644 /*
14645 * Maybe re-enter raw-mode and log.
14646 */
14647#ifdef IN_RC
14648 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14649#endif
14650 if (rcStrict != VINF_SUCCESS)
14651 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14652 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14653 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14654 return rcStrict;
14655}
14656
14657
14658/**
14659 * Injects a trap, fault, abort, software interrupt or external interrupt.
14660 *
14661 * The parameter list matches TRPMQueryTrapAll pretty closely.
14662 *
14663 * @returns Strict VBox status code.
14664 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14665 * @param u8TrapNo The trap number.
14666 * @param enmType What type is it (trap/fault/abort), software
14667 * interrupt or hardware interrupt.
14668 * @param uErrCode The error code if applicable.
14669 * @param uCr2 The CR2 value if applicable.
14670 * @param cbInstr The instruction length (only relevant for
14671 * software interrupts).
14672 */
14673VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14674 uint8_t cbInstr)
14675{
14676 iemInitDecoder(pVCpu, false);
14677#ifdef DBGFTRACE_ENABLED
14678 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14679 u8TrapNo, enmType, uErrCode, uCr2);
14680#endif
14681
14682 uint32_t fFlags;
14683 switch (enmType)
14684 {
14685 case TRPM_HARDWARE_INT:
14686 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14687 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14688 uErrCode = uCr2 = 0;
14689 break;
14690
14691 case TRPM_SOFTWARE_INT:
14692 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14693 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14694 uErrCode = uCr2 = 0;
14695 break;
14696
14697 case TRPM_TRAP:
14698 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14699 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14700 if (u8TrapNo == X86_XCPT_PF)
14701 fFlags |= IEM_XCPT_FLAGS_CR2;
14702 switch (u8TrapNo)
14703 {
14704 case X86_XCPT_DF:
14705 case X86_XCPT_TS:
14706 case X86_XCPT_NP:
14707 case X86_XCPT_SS:
14708 case X86_XCPT_PF:
14709 case X86_XCPT_AC:
14710 fFlags |= IEM_XCPT_FLAGS_ERR;
14711 break;
14712
14713 case X86_XCPT_NMI:
14714 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14715 break;
14716 }
14717 break;
14718
14719 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14720 }
14721
14722 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14723
14724 if (pVCpu->iem.s.cActiveMappings > 0)
14725 iemMemRollback(pVCpu);
14726
14727 return rcStrict;
14728}
14729
14730
14731/**
14732 * Injects the active TRPM event.
14733 *
14734 * @returns Strict VBox status code.
14735 * @param pVCpu The cross context virtual CPU structure.
14736 */
14737VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14738{
14739#ifndef IEM_IMPLEMENTS_TASKSWITCH
14740 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14741#else
14742 uint8_t u8TrapNo;
14743 TRPMEVENT enmType;
14744 RTGCUINT uErrCode;
14745 RTGCUINTPTR uCr2;
14746 uint8_t cbInstr;
14747 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14748 if (RT_FAILURE(rc))
14749 return rc;
14750
14751 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14752# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14753 if (rcStrict == VINF_SVM_VMEXIT)
14754 rcStrict = VINF_SUCCESS;
14755# endif
14756
14757 /** @todo Are there any other codes that imply the event was successfully
14758 * delivered to the guest? See @bugref{6607}. */
14759 if ( rcStrict == VINF_SUCCESS
14760 || rcStrict == VINF_IEM_RAISED_XCPT)
14761 TRPMResetTrap(pVCpu);
14762
14763 return rcStrict;
14764#endif
14765}
14766
14767
14768VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14769{
14770 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14771 return VERR_NOT_IMPLEMENTED;
14772}
14773
14774
14775VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14776{
14777 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14778 return VERR_NOT_IMPLEMENTED;
14779}
14780
14781
14782#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14783/**
14784 * Executes a IRET instruction with default operand size.
14785 *
14786 * This is for PATM.
14787 *
14788 * @returns VBox status code.
14789 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14790 * @param pCtxCore The register frame.
14791 */
14792VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14793{
14794 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14795
14796 iemCtxCoreToCtx(pCtx, pCtxCore);
14797 iemInitDecoder(pVCpu);
14798 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14799 if (rcStrict == VINF_SUCCESS)
14800 iemCtxToCtxCore(pCtxCore, pCtx);
14801 else
14802 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14803 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14804 return rcStrict;
14805}
14806#endif
14807
14808
14809/**
14810 * Macro used by the IEMExec* method to check the given instruction length.
14811 *
14812 * Will return on failure!
14813 *
14814 * @param a_cbInstr The given instruction length.
14815 * @param a_cbMin The minimum length.
14816 */
14817#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14818 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14819 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14820
14821
14822/**
14823 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14824 *
14825 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14826 *
14827 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14829 * @param rcStrict The status code to fiddle.
14830 */
14831DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14832{
14833 iemUninitExec(pVCpu);
14834#ifdef IN_RC
14835 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14836#else
14837 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14838#endif
14839}
14840
14841
14842/**
14843 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14844 *
14845 * This API ASSUMES that the caller has already verified that the guest code is
14846 * allowed to access the I/O port. (The I/O port is in the DX register in the
14847 * guest state.)
14848 *
14849 * @returns Strict VBox status code.
14850 * @param pVCpu The cross context virtual CPU structure.
14851 * @param cbValue The size of the I/O port access (1, 2, or 4).
14852 * @param enmAddrMode The addressing mode.
14853 * @param fRepPrefix Indicates whether a repeat prefix is used
14854 * (doesn't matter which for this instruction).
14855 * @param cbInstr The instruction length in bytes.
14856 * @param iEffSeg The effective segment address.
14857 * @param fIoChecked Whether the access to the I/O port has been
14858 * checked or not. It's typically checked in the
14859 * HM scenario.
14860 */
14861VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14862 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14863{
14864 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14865 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14866
14867 /*
14868 * State init.
14869 */
14870 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14871
14872 /*
14873 * Switch orgy for getting to the right handler.
14874 */
14875 VBOXSTRICTRC rcStrict;
14876 if (fRepPrefix)
14877 {
14878 switch (enmAddrMode)
14879 {
14880 case IEMMODE_16BIT:
14881 switch (cbValue)
14882 {
14883 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14884 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14885 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14886 default:
14887 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14888 }
14889 break;
14890
14891 case IEMMODE_32BIT:
14892 switch (cbValue)
14893 {
14894 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14895 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14896 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14897 default:
14898 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14899 }
14900 break;
14901
14902 case IEMMODE_64BIT:
14903 switch (cbValue)
14904 {
14905 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14906 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14907 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14908 default:
14909 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14910 }
14911 break;
14912
14913 default:
14914 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14915 }
14916 }
14917 else
14918 {
14919 switch (enmAddrMode)
14920 {
14921 case IEMMODE_16BIT:
14922 switch (cbValue)
14923 {
14924 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14925 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14926 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14927 default:
14928 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14929 }
14930 break;
14931
14932 case IEMMODE_32BIT:
14933 switch (cbValue)
14934 {
14935 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14936 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14937 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14938 default:
14939 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14940 }
14941 break;
14942
14943 case IEMMODE_64BIT:
14944 switch (cbValue)
14945 {
14946 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14947 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14948 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14949 default:
14950 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14951 }
14952 break;
14953
14954 default:
14955 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14956 }
14957 }
14958
14959 if (pVCpu->iem.s.cActiveMappings)
14960 iemMemRollback(pVCpu);
14961
14962 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14963}
14964
14965
14966/**
14967 * Interface for HM and EM for executing string I/O IN (read) instructions.
14968 *
14969 * This API ASSUMES that the caller has already verified that the guest code is
14970 * allowed to access the I/O port. (The I/O port is in the DX register in the
14971 * guest state.)
14972 *
14973 * @returns Strict VBox status code.
14974 * @param pVCpu The cross context virtual CPU structure.
14975 * @param cbValue The size of the I/O port access (1, 2, or 4).
14976 * @param enmAddrMode The addressing mode.
14977 * @param fRepPrefix Indicates whether a repeat prefix is used
14978 * (doesn't matter which for this instruction).
14979 * @param cbInstr The instruction length in bytes.
14980 * @param fIoChecked Whether the access to the I/O port has been
14981 * checked or not. It's typically checked in the
14982 * HM scenario.
14983 */
14984VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14985 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14986{
14987 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14988
14989 /*
14990 * State init.
14991 */
14992 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14993
14994 /*
14995 * Switch orgy for getting to the right handler.
14996 */
14997 VBOXSTRICTRC rcStrict;
14998 if (fRepPrefix)
14999 {
15000 switch (enmAddrMode)
15001 {
15002 case IEMMODE_16BIT:
15003 switch (cbValue)
15004 {
15005 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15006 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15007 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15008 default:
15009 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15010 }
15011 break;
15012
15013 case IEMMODE_32BIT:
15014 switch (cbValue)
15015 {
15016 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15017 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15018 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15019 default:
15020 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15021 }
15022 break;
15023
15024 case IEMMODE_64BIT:
15025 switch (cbValue)
15026 {
15027 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15028 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15029 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15030 default:
15031 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15032 }
15033 break;
15034
15035 default:
15036 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15037 }
15038 }
15039 else
15040 {
15041 switch (enmAddrMode)
15042 {
15043 case IEMMODE_16BIT:
15044 switch (cbValue)
15045 {
15046 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15047 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15048 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15049 default:
15050 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15051 }
15052 break;
15053
15054 case IEMMODE_32BIT:
15055 switch (cbValue)
15056 {
15057 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15058 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15059 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15060 default:
15061 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15062 }
15063 break;
15064
15065 case IEMMODE_64BIT:
15066 switch (cbValue)
15067 {
15068 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15069 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15070 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15071 default:
15072 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15073 }
15074 break;
15075
15076 default:
15077 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15078 }
15079 }
15080
15081 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15082 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15083}
15084
15085
15086/**
15087 * Interface for rawmode to write execute an OUT instruction.
15088 *
15089 * @returns Strict VBox status code.
15090 * @param pVCpu The cross context virtual CPU structure.
15091 * @param cbInstr The instruction length in bytes.
15092 * @param u16Port The port to read.
15093 * @param fImm Whether the port is specified using an immediate operand or
15094 * using the implicit DX register.
15095 * @param cbReg The register size.
15096 *
15097 * @remarks In ring-0 not all of the state needs to be synced in.
15098 */
15099VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15100{
15101 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15102 Assert(cbReg <= 4 && cbReg != 3);
15103
15104 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15105 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15106 Assert(!pVCpu->iem.s.cActiveMappings);
15107 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15108}
15109
15110
15111/**
15112 * Interface for rawmode to write execute an IN instruction.
15113 *
15114 * @returns Strict VBox status code.
15115 * @param pVCpu The cross context virtual CPU structure.
15116 * @param cbInstr The instruction length in bytes.
15117 * @param u16Port The port to read.
15118 * @param fImm Whether the port is specified using an immediate operand or
15119 * using the implicit DX.
15120 * @param cbReg The register size.
15121 */
15122VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15123{
15124 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15125 Assert(cbReg <= 4 && cbReg != 3);
15126
15127 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15128 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15129 Assert(!pVCpu->iem.s.cActiveMappings);
15130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15131}
15132
15133
15134/**
15135 * Interface for HM and EM to write to a CRx register.
15136 *
15137 * @returns Strict VBox status code.
15138 * @param pVCpu The cross context virtual CPU structure.
15139 * @param cbInstr The instruction length in bytes.
15140 * @param iCrReg The control register number (destination).
15141 * @param iGReg The general purpose register number (source).
15142 *
15143 * @remarks In ring-0 not all of the state needs to be synced in.
15144 */
15145VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15146{
15147 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15148 Assert(iCrReg < 16);
15149 Assert(iGReg < 16);
15150
15151 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15152 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15153 Assert(!pVCpu->iem.s.cActiveMappings);
15154 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15155}
15156
15157
15158/**
15159 * Interface for HM and EM to read from a CRx register.
15160 *
15161 * @returns Strict VBox status code.
15162 * @param pVCpu The cross context virtual CPU structure.
15163 * @param cbInstr The instruction length in bytes.
15164 * @param iGReg The general purpose register number (destination).
15165 * @param iCrReg The control register number (source).
15166 *
15167 * @remarks In ring-0 not all of the state needs to be synced in.
15168 */
15169VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15170{
15171 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15172 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15173 | CPUMCTX_EXTRN_APIC_TPR);
15174 Assert(iCrReg < 16);
15175 Assert(iGReg < 16);
15176
15177 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15178 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15179 Assert(!pVCpu->iem.s.cActiveMappings);
15180 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15181}
15182
15183
15184/**
15185 * Interface for HM and EM to clear the CR0[TS] bit.
15186 *
15187 * @returns Strict VBox status code.
15188 * @param pVCpu The cross context virtual CPU structure.
15189 * @param cbInstr The instruction length in bytes.
15190 *
15191 * @remarks In ring-0 not all of the state needs to be synced in.
15192 */
15193VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15194{
15195 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15196
15197 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15198 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15199 Assert(!pVCpu->iem.s.cActiveMappings);
15200 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15201}
15202
15203
15204/**
15205 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15206 *
15207 * @returns Strict VBox status code.
15208 * @param pVCpu The cross context virtual CPU structure.
15209 * @param cbInstr The instruction length in bytes.
15210 * @param uValue The value to load into CR0.
15211 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15212 * memory operand. Otherwise pass NIL_RTGCPTR.
15213 *
15214 * @remarks In ring-0 not all of the state needs to be synced in.
15215 */
15216VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15217{
15218 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15219
15220 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15221 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15222 Assert(!pVCpu->iem.s.cActiveMappings);
15223 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15224}
15225
15226
15227/**
15228 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15229 *
15230 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15231 *
15232 * @returns Strict VBox status code.
15233 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15234 * @param cbInstr The instruction length in bytes.
15235 * @remarks In ring-0 not all of the state needs to be synced in.
15236 * @thread EMT(pVCpu)
15237 */
15238VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15239{
15240 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15241
15242 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15243 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15244 Assert(!pVCpu->iem.s.cActiveMappings);
15245 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15246}
15247
15248
15249/**
15250 * Interface for HM and EM to emulate the WBINVD instruction.
15251 *
15252 * @returns Strict VBox status code.
15253 * @param pVCpu The cross context virtual CPU structure.
15254 * @param cbInstr The instruction length in bytes.
15255 *
15256 * @remarks In ring-0 not all of the state needs to be synced in.
15257 */
15258VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15259{
15260 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15261
15262 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15263 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15264 Assert(!pVCpu->iem.s.cActiveMappings);
15265 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15266}
15267
15268
15269/**
15270 * Interface for HM and EM to emulate the INVD instruction.
15271 *
15272 * @returns Strict VBox status code.
15273 * @param pVCpu The cross context virtual CPU structure.
15274 * @param cbInstr The instruction length in bytes.
15275 *
15276 * @remarks In ring-0 not all of the state needs to be synced in.
15277 */
15278VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15279{
15280 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15281
15282 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15283 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15284 Assert(!pVCpu->iem.s.cActiveMappings);
15285 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15286}
15287
15288
15289/**
15290 * Interface for HM and EM to emulate the INVLPG instruction.
15291 *
15292 * @returns Strict VBox status code.
15293 * @retval VINF_PGM_SYNC_CR3
15294 *
15295 * @param pVCpu The cross context virtual CPU structure.
15296 * @param cbInstr The instruction length in bytes.
15297 * @param GCPtrPage The effective address of the page to invalidate.
15298 *
15299 * @remarks In ring-0 not all of the state needs to be synced in.
15300 */
15301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15302{
15303 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15304
15305 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15306 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15307 Assert(!pVCpu->iem.s.cActiveMappings);
15308 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15309}
15310
15311
15312/**
15313 * Interface for HM and EM to emulate the CPUID instruction.
15314 *
15315 * @returns Strict VBox status code.
15316 *
15317 * @param pVCpu The cross context virtual CPU structure.
15318 * @param cbInstr The instruction length in bytes.
15319 *
15320 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15321 */
15322VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15323{
15324 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15325 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15326
15327 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15328 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15329 Assert(!pVCpu->iem.s.cActiveMappings);
15330 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15331}
15332
15333
15334/**
15335 * Interface for HM and EM to emulate the RDPMC instruction.
15336 *
15337 * @returns Strict VBox status code.
15338 *
15339 * @param pVCpu The cross context virtual CPU structure.
15340 * @param cbInstr The instruction length in bytes.
15341 *
15342 * @remarks Not all of the state needs to be synced in.
15343 */
15344VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15345{
15346 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15347 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15348
15349 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15350 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15351 Assert(!pVCpu->iem.s.cActiveMappings);
15352 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15353}
15354
15355
15356/**
15357 * Interface for HM and EM to emulate the RDTSC instruction.
15358 *
15359 * @returns Strict VBox status code.
15360 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15361 *
15362 * @param pVCpu The cross context virtual CPU structure.
15363 * @param cbInstr The instruction length in bytes.
15364 *
15365 * @remarks Not all of the state needs to be synced in.
15366 */
15367VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15368{
15369 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15370 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15371
15372 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15373 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15374 Assert(!pVCpu->iem.s.cActiveMappings);
15375 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15376}
15377
15378
15379/**
15380 * Interface for HM and EM to emulate the RDTSCP instruction.
15381 *
15382 * @returns Strict VBox status code.
15383 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15384 *
15385 * @param pVCpu The cross context virtual CPU structure.
15386 * @param cbInstr The instruction length in bytes.
15387 *
15388 * @remarks Not all of the state needs to be synced in. Recommended
15389 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15390 */
15391VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15392{
15393 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15394 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15395
15396 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15397 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15398 Assert(!pVCpu->iem.s.cActiveMappings);
15399 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15400}
15401
15402
15403/**
15404 * Interface for HM and EM to emulate the RDMSR instruction.
15405 *
15406 * @returns Strict VBox status code.
15407 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15408 *
15409 * @param pVCpu The cross context virtual CPU structure.
15410 * @param cbInstr The instruction length in bytes.
15411 *
15412 * @remarks Not all of the state needs to be synced in. Requires RCX and
15413 * (currently) all MSRs.
15414 */
15415VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15416{
15417 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15418 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15419
15420 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15421 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15422 Assert(!pVCpu->iem.s.cActiveMappings);
15423 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15424}
15425
15426
15427/**
15428 * Interface for HM and EM to emulate the WRMSR instruction.
15429 *
15430 * @returns Strict VBox status code.
15431 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15432 *
15433 * @param pVCpu The cross context virtual CPU structure.
15434 * @param cbInstr The instruction length in bytes.
15435 *
15436 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15437 * and (currently) all MSRs.
15438 */
15439VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15440{
15441 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15442 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15443 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15444
15445 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15446 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15447 Assert(!pVCpu->iem.s.cActiveMappings);
15448 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15449}
15450
15451
15452/**
15453 * Interface for HM and EM to emulate the MONITOR instruction.
15454 *
15455 * @returns Strict VBox status code.
15456 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15457 *
15458 * @param pVCpu The cross context virtual CPU structure.
15459 * @param cbInstr The instruction length in bytes.
15460 *
15461 * @remarks Not all of the state needs to be synced in.
15462 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15463 * are used.
15464 */
15465VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15466{
15467 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15468 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15469
15470 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15471 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15472 Assert(!pVCpu->iem.s.cActiveMappings);
15473 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15474}
15475
15476
15477/**
15478 * Interface for HM and EM to emulate the MWAIT instruction.
15479 *
15480 * @returns Strict VBox status code.
15481 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15482 *
15483 * @param pVCpu The cross context virtual CPU structure.
15484 * @param cbInstr The instruction length in bytes.
15485 *
15486 * @remarks Not all of the state needs to be synced in.
15487 */
15488VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15489{
15490 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15491
15492 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15493 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15494 Assert(!pVCpu->iem.s.cActiveMappings);
15495 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15496}
15497
15498
15499/**
15500 * Interface for HM and EM to emulate the HLT instruction.
15501 *
15502 * @returns Strict VBox status code.
15503 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15504 *
15505 * @param pVCpu The cross context virtual CPU structure.
15506 * @param cbInstr The instruction length in bytes.
15507 *
15508 * @remarks Not all of the state needs to be synced in.
15509 */
15510VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15511{
15512 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15513
15514 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15515 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15516 Assert(!pVCpu->iem.s.cActiveMappings);
15517 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15518}
15519
15520
15521/**
15522 * Checks if IEM is in the process of delivering an event (interrupt or
15523 * exception).
15524 *
15525 * @returns true if we're in the process of raising an interrupt or exception,
15526 * false otherwise.
15527 * @param pVCpu The cross context virtual CPU structure.
15528 * @param puVector Where to store the vector associated with the
15529 * currently delivered event, optional.
15530 * @param pfFlags Where to store th event delivery flags (see
15531 * IEM_XCPT_FLAGS_XXX), optional.
15532 * @param puErr Where to store the error code associated with the
15533 * event, optional.
15534 * @param puCr2 Where to store the CR2 associated with the event,
15535 * optional.
15536 * @remarks The caller should check the flags to determine if the error code and
15537 * CR2 are valid for the event.
15538 */
15539VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15540{
15541 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15542 if (fRaisingXcpt)
15543 {
15544 if (puVector)
15545 *puVector = pVCpu->iem.s.uCurXcpt;
15546 if (pfFlags)
15547 *pfFlags = pVCpu->iem.s.fCurXcpt;
15548 if (puErr)
15549 *puErr = pVCpu->iem.s.uCurXcptErr;
15550 if (puCr2)
15551 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15552 }
15553 return fRaisingXcpt;
15554}
15555
15556#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15557
15558/**
15559 * Interface for HM and EM to emulate the CLGI instruction.
15560 *
15561 * @returns Strict VBox status code.
15562 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15563 * @param cbInstr The instruction length in bytes.
15564 * @thread EMT(pVCpu)
15565 */
15566VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15567{
15568 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15569
15570 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15571 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15572 Assert(!pVCpu->iem.s.cActiveMappings);
15573 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15574}
15575
15576
15577/**
15578 * Interface for HM and EM to emulate the STGI instruction.
15579 *
15580 * @returns Strict VBox status code.
15581 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15582 * @param cbInstr The instruction length in bytes.
15583 * @thread EMT(pVCpu)
15584 */
15585VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15586{
15587 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15588
15589 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15590 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15591 Assert(!pVCpu->iem.s.cActiveMappings);
15592 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15593}
15594
15595
15596/**
15597 * Interface for HM and EM to emulate the VMLOAD instruction.
15598 *
15599 * @returns Strict VBox status code.
15600 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15601 * @param cbInstr The instruction length in bytes.
15602 * @thread EMT(pVCpu)
15603 */
15604VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15605{
15606 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15607
15608 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15609 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15610 Assert(!pVCpu->iem.s.cActiveMappings);
15611 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15612}
15613
15614
15615/**
15616 * Interface for HM and EM to emulate the VMSAVE instruction.
15617 *
15618 * @returns Strict VBox status code.
15619 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15620 * @param cbInstr The instruction length in bytes.
15621 * @thread EMT(pVCpu)
15622 */
15623VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15624{
15625 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15626
15627 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15628 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15629 Assert(!pVCpu->iem.s.cActiveMappings);
15630 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15631}
15632
15633
15634/**
15635 * Interface for HM and EM to emulate the INVLPGA instruction.
15636 *
15637 * @returns Strict VBox status code.
15638 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15639 * @param cbInstr The instruction length in bytes.
15640 * @thread EMT(pVCpu)
15641 */
15642VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15643{
15644 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15645
15646 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15647 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15648 Assert(!pVCpu->iem.s.cActiveMappings);
15649 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15650}
15651
15652
15653/**
15654 * Interface for HM and EM to emulate the VMRUN instruction.
15655 *
15656 * @returns Strict VBox status code.
15657 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15658 * @param cbInstr The instruction length in bytes.
15659 * @thread EMT(pVCpu)
15660 */
15661VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15662{
15663 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15664 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15665
15666 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15667 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15668 Assert(!pVCpu->iem.s.cActiveMappings);
15669 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15670}
15671
15672
15673/**
15674 * Interface for HM and EM to emulate \#VMEXIT.
15675 *
15676 * @returns Strict VBox status code.
15677 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15678 * @param uExitCode The exit code.
15679 * @param uExitInfo1 The exit info. 1 field.
15680 * @param uExitInfo2 The exit info. 2 field.
15681 * @thread EMT(pVCpu)
15682 */
15683VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15684{
15685 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15686 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15687 if (pVCpu->iem.s.cActiveMappings)
15688 iemMemRollback(pVCpu);
15689 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15690}
15691
15692#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15693
15694#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15695
15696/**
15697 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15698 *
15699 * @returns Strict VBox status code.
15700 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15701 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15702 * the x2APIC device.
15703 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15704 *
15705 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15706 * @param idMsr The MSR being read.
15707 * @param pu64Value Pointer to the value being written or where to store the
15708 * value being read.
15709 * @param fWrite Whether this is an MSR write or read access.
15710 * @thread EMT(pVCpu)
15711 */
15712VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15713{
15714 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
15715 Assert(pu64Value);
15716
15717 VBOXSTRICTRC rcStrict;
15718 if (!fWrite)
15719 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15720 else
15721 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15722 if (pVCpu->iem.s.cActiveMappings)
15723 iemMemRollback(pVCpu);
15724 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15725
15726}
15727
15728
15729/**
15730 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15731 *
15732 * @returns Strict VBox status code.
15733 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15734 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15735 *
15736 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15737 * @param offAccess The offset of the register being accessed (within the
15738 * APIC-access page).
15739 * @param cbAccess The size of the access in bytes.
15740 * @param pvData Pointer to the data being written or where to store the data
15741 * being read.
15742 * @param fWrite Whether this is a write or read access.
15743 * @thread EMT(pVCpu)
15744 */
15745VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
15746 bool fWrite)
15747{
15748 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15749 Assert(pvData);
15750
15751 /** @todo NSTVMX: Unfortunately, the caller has no idea about instruction fetch
15752 * accesses, so we only use read/write here. Maybe in the future the PGM
15753 * physical handler will be extended to include this information? */
15754 uint32_t const fAccess = fWrite ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
15755 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbAccess, pvData, fAccess);
15756 if (pVCpu->iem.s.cActiveMappings)
15757 iemMemRollback(pVCpu);
15758 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15759}
15760
15761
15762/**
15763 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15764 * VM-exit.
15765 *
15766 * @returns Strict VBox status code.
15767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15768 * @thread EMT(pVCpu)
15769 */
15770VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15771{
15772 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15773
15774 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15775 if (pVCpu->iem.s.cActiveMappings)
15776 iemMemRollback(pVCpu);
15777 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15778}
15779
15780
15781/**
15782 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15783 *
15784 * @returns Strict VBox status code.
15785 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15786 * @thread EMT(pVCpu)
15787 */
15788VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15789{
15790 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15791 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15792 if (pVCpu->iem.s.cActiveMappings)
15793 iemMemRollback(pVCpu);
15794 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15795}
15796
15797
15798/**
15799 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15800 *
15801 * @returns Strict VBox status code.
15802 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15803 * @param uVector The external interrupt vector.
15804 * @param fIntPending Whether the external interrupt is pending or
15805 * acknowdledged in the interrupt controller.
15806 * @thread EMT(pVCpu)
15807 */
15808VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15809{
15810 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15811 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15812 if (pVCpu->iem.s.cActiveMappings)
15813 iemMemRollback(pVCpu);
15814 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15815}
15816
15817
15818/**
15819 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15820 *
15821 * @returns Strict VBox status code.
15822 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15823 * @param uVector The SIPI vector.
15824 * @thread EMT(pVCpu)
15825 */
15826VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15827{
15828 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15829 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15830 if (pVCpu->iem.s.cActiveMappings)
15831 iemMemRollback(pVCpu);
15832 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15833}
15834
15835
15836/**
15837 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15838 *
15839 * @returns Strict VBox status code.
15840 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15841 * @thread EMT(pVCpu)
15842 */
15843VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15844{
15845 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15846 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15847 if (pVCpu->iem.s.cActiveMappings)
15848 iemMemRollback(pVCpu);
15849 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15850}
15851
15852
15853/**
15854 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15855 *
15856 * @returns Strict VBox status code.
15857 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15858 * @thread EMT(pVCpu)
15859 */
15860VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15861{
15862 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15863 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15864 if (pVCpu->iem.s.cActiveMappings)
15865 iemMemRollback(pVCpu);
15866 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15867}
15868
15869
15870/**
15871 * Interface for HM and EM to emulate VM-exits Monitor-Trap Flag (MTF).
15872 *
15873 * @returns Strict VBox status code.
15874 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15875 * @thread EMT(pVCpu)
15876 */
15877VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitMtf(PVMCPU pVCpu)
15878{
15879 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15880 VBOXSTRICTRC rcStrict = iemVmxVmexitMtf(pVCpu);
15881 if (pVCpu->iem.s.cActiveMappings)
15882 iemMemRollback(pVCpu);
15883 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15884}
15885
15886
15887/**
15888 * Interface for HM and EM to emulate the VMREAD instruction.
15889 *
15890 * @returns Strict VBox status code.
15891 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15892 * @param pExitInfo Pointer to the VM-exit information struct.
15893 * @thread EMT(pVCpu)
15894 */
15895VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15896{
15897 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15898 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15899 Assert(pExitInfo);
15900
15901 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15902
15903 VBOXSTRICTRC rcStrict;
15904 uint8_t const cbInstr = pExitInfo->cbInstr;
15905 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15906 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15907 {
15908 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15909 {
15910 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15911 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15912 }
15913 else
15914 {
15915 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15916 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15917 }
15918 }
15919 else
15920 {
15921 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15922 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15923 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15924 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15925 }
15926 if (pVCpu->iem.s.cActiveMappings)
15927 iemMemRollback(pVCpu);
15928 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15929}
15930
15931
15932/**
15933 * Interface for HM and EM to emulate the VMWRITE instruction.
15934 *
15935 * @returns Strict VBox status code.
15936 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15937 * @param pExitInfo Pointer to the VM-exit information struct.
15938 * @thread EMT(pVCpu)
15939 */
15940VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15941{
15942 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15943 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15944 Assert(pExitInfo);
15945
15946 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15947
15948 uint64_t u64Val;
15949 uint8_t iEffSeg;
15950 IEMMODE enmEffAddrMode;
15951 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15952 {
15953 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15954 iEffSeg = UINT8_MAX;
15955 enmEffAddrMode = UINT8_MAX;
15956 }
15957 else
15958 {
15959 u64Val = pExitInfo->GCPtrEffAddr;
15960 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15961 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15962 }
15963 uint8_t const cbInstr = pExitInfo->cbInstr;
15964 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15965 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15966 if (pVCpu->iem.s.cActiveMappings)
15967 iemMemRollback(pVCpu);
15968 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15969}
15970
15971
15972/**
15973 * Interface for HM and EM to emulate the VMPTRLD instruction.
15974 *
15975 * @returns Strict VBox status code.
15976 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15977 * @param pExitInfo Pointer to the VM-exit information struct.
15978 * @thread EMT(pVCpu)
15979 */
15980VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15981{
15982 Assert(pExitInfo);
15983 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15984 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15985
15986 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15987
15988 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15989 uint8_t const cbInstr = pExitInfo->cbInstr;
15990 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15991 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15992 if (pVCpu->iem.s.cActiveMappings)
15993 iemMemRollback(pVCpu);
15994 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15995}
15996
15997
15998/**
15999 * Interface for HM and EM to emulate the VMPTRST instruction.
16000 *
16001 * @returns Strict VBox status code.
16002 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16003 * @param pExitInfo Pointer to the VM-exit information struct.
16004 * @thread EMT(pVCpu)
16005 */
16006VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16007{
16008 Assert(pExitInfo);
16009 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16010 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16011
16012 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16013
16014 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16015 uint8_t const cbInstr = pExitInfo->cbInstr;
16016 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16017 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16018 if (pVCpu->iem.s.cActiveMappings)
16019 iemMemRollback(pVCpu);
16020 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16021}
16022
16023
16024/**
16025 * Interface for HM and EM to emulate the VMCLEAR instruction.
16026 *
16027 * @returns Strict VBox status code.
16028 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16029 * @param pExitInfo Pointer to the VM-exit information struct.
16030 * @thread EMT(pVCpu)
16031 */
16032VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16033{
16034 Assert(pExitInfo);
16035 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16036 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16037
16038 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16039
16040 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16041 uint8_t const cbInstr = pExitInfo->cbInstr;
16042 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16043 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16044 if (pVCpu->iem.s.cActiveMappings)
16045 iemMemRollback(pVCpu);
16046 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16047}
16048
16049
16050/**
16051 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16052 *
16053 * @returns Strict VBox status code.
16054 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16055 * @param cbInstr The instruction length in bytes.
16056 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16057 * VMXINSTRID_VMRESUME).
16058 * @thread EMT(pVCpu)
16059 */
16060VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16061{
16062 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16063 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16064
16065 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16066 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16067 if (pVCpu->iem.s.cActiveMappings)
16068 iemMemRollback(pVCpu);
16069 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16070}
16071
16072
16073/**
16074 * Interface for HM and EM to emulate the VMXON instruction.
16075 *
16076 * @returns Strict VBox status code.
16077 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16078 * @param pExitInfo Pointer to the VM-exit information struct.
16079 * @thread EMT(pVCpu)
16080 */
16081VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16082{
16083 Assert(pExitInfo);
16084 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16085 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16086
16087 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16088
16089 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16090 uint8_t const cbInstr = pExitInfo->cbInstr;
16091 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16092 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16093 if (pVCpu->iem.s.cActiveMappings)
16094 iemMemRollback(pVCpu);
16095 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16096}
16097
16098
16099/**
16100 * Interface for HM and EM to emulate the VMXOFF instruction.
16101 *
16102 * @returns Strict VBox status code.
16103 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16104 * @param cbInstr The instruction length in bytes.
16105 * @thread EMT(pVCpu)
16106 */
16107VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16108{
16109 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16110 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16111
16112 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16113 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16114 Assert(!pVCpu->iem.s.cActiveMappings);
16115 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16116}
16117
16118
16119/**
16120 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16121 *
16122 * @remarks The @a pvUser argument is currently unused.
16123 */
16124PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16125 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16126 PGMACCESSORIGIN enmOrigin, void *pvUser)
16127{
16128 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16129
16130 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16131 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16132 {
16133 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16134 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16135
16136 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16137 * Currently they will go through as read accesses. */
16138 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16139 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16140 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16141 if (RT_FAILURE(rcStrict))
16142 return rcStrict;
16143
16144 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16145 return VINF_SUCCESS;
16146 }
16147
16148 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16149 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16150 if (RT_FAILURE(rc))
16151 return rc;
16152
16153 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16154 return VINF_PGM_HANDLER_DO_DEFAULT;
16155}
16156
16157#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16158
16159#ifdef IN_RING3
16160
16161/**
16162 * Handles the unlikely and probably fatal merge cases.
16163 *
16164 * @returns Merged status code.
16165 * @param rcStrict Current EM status code.
16166 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16167 * with @a rcStrict.
16168 * @param iMemMap The memory mapping index. For error reporting only.
16169 * @param pVCpu The cross context virtual CPU structure of the calling
16170 * thread, for error reporting only.
16171 */
16172DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16173 unsigned iMemMap, PVMCPU pVCpu)
16174{
16175 if (RT_FAILURE_NP(rcStrict))
16176 return rcStrict;
16177
16178 if (RT_FAILURE_NP(rcStrictCommit))
16179 return rcStrictCommit;
16180
16181 if (rcStrict == rcStrictCommit)
16182 return rcStrictCommit;
16183
16184 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16185 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16186 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16187 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16188 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16189 return VERR_IOM_FF_STATUS_IPE;
16190}
16191
16192
16193/**
16194 * Helper for IOMR3ProcessForceFlag.
16195 *
16196 * @returns Merged status code.
16197 * @param rcStrict Current EM status code.
16198 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16199 * with @a rcStrict.
16200 * @param iMemMap The memory mapping index. For error reporting only.
16201 * @param pVCpu The cross context virtual CPU structure of the calling
16202 * thread, for error reporting only.
16203 */
16204DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16205{
16206 /* Simple. */
16207 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16208 return rcStrictCommit;
16209
16210 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16211 return rcStrict;
16212
16213 /* EM scheduling status codes. */
16214 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16215 && rcStrict <= VINF_EM_LAST))
16216 {
16217 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16218 && rcStrictCommit <= VINF_EM_LAST))
16219 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16220 }
16221
16222 /* Unlikely */
16223 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16224}
16225
16226
16227/**
16228 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16229 *
16230 * @returns Merge between @a rcStrict and what the commit operation returned.
16231 * @param pVM The cross context VM structure.
16232 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16233 * @param rcStrict The status code returned by ring-0 or raw-mode.
16234 */
16235VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16236{
16237 /*
16238 * Reset the pending commit.
16239 */
16240 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16241 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16242 ("%#x %#x %#x\n",
16243 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16244 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16245
16246 /*
16247 * Commit the pending bounce buffers (usually just one).
16248 */
16249 unsigned cBufs = 0;
16250 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16251 while (iMemMap-- > 0)
16252 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16253 {
16254 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16255 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16256 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16257
16258 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16259 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16260 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16261
16262 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16263 {
16264 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16265 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16266 pbBuf,
16267 cbFirst,
16268 PGMACCESSORIGIN_IEM);
16269 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16270 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16271 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16272 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16273 }
16274
16275 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16276 {
16277 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16278 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16279 pbBuf + cbFirst,
16280 cbSecond,
16281 PGMACCESSORIGIN_IEM);
16282 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16283 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16284 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16285 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16286 }
16287 cBufs++;
16288 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16289 }
16290
16291 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16292 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16293 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16294 pVCpu->iem.s.cActiveMappings = 0;
16295 return rcStrict;
16296}
16297
16298#endif /* IN_RING3 */
16299
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette