VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 75249

Last change on this file since 75249 was 75201, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 VM-exit bits; preemption timer interfaces. Using them is todo.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 635.5 KB
Line 
1/* $Id: IEMAll.cpp 75201 2018-10-31 09:05:52Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/asm-math.h>
121#include <iprt/assert.h>
122#include <iprt/string.h>
123#include <iprt/x86.h>
124
125
126/*********************************************************************************************************************************
127* Structures and Typedefs *
128*********************************************************************************************************************************/
129/** @typedef PFNIEMOP
130 * Pointer to an opcode decoder function.
131 */
132
133/** @def FNIEMOP_DEF
134 * Define an opcode decoder function.
135 *
136 * We're using macors for this so that adding and removing parameters as well as
137 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
138 *
139 * @param a_Name The function name.
140 */
141
142/** @typedef PFNIEMOPRM
143 * Pointer to an opcode decoder function with RM byte.
144 */
145
146/** @def FNIEMOPRM_DEF
147 * Define an opcode decoder function with RM byte.
148 *
149 * We're using macors for this so that adding and removing parameters as well as
150 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
151 *
152 * @param a_Name The function name.
153 */
154
155#if defined(__GNUC__) && defined(RT_ARCH_X86)
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
157typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
158# define FNIEMOP_DEF(a_Name) \
159 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
160# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
161 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
162# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
163 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
164
165#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
167typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#elif defined(__GNUC__)
176typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
177typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
178# define FNIEMOP_DEF(a_Name) \
179 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
180# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
181 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
182# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
183 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
184
185#else
186typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
187typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
188# define FNIEMOP_DEF(a_Name) \
189 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
190# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
191 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
192# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
193 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
194
195#endif
196#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
197
198
199/**
200 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
201 */
202typedef union IEMSELDESC
203{
204 /** The legacy view. */
205 X86DESC Legacy;
206 /** The long mode view. */
207 X86DESC64 Long;
208} IEMSELDESC;
209/** Pointer to a selector descriptor table entry. */
210typedef IEMSELDESC *PIEMSELDESC;
211
212/**
213 * CPU exception classes.
214 */
215typedef enum IEMXCPTCLASS
216{
217 IEMXCPTCLASS_BENIGN,
218 IEMXCPTCLASS_CONTRIBUTORY,
219 IEMXCPTCLASS_PAGE_FAULT,
220 IEMXCPTCLASS_DOUBLE_FAULT
221} IEMXCPTCLASS;
222
223
224/*********************************************************************************************************************************
225* Defined Constants And Macros *
226*********************************************************************************************************************************/
227/** @def IEM_WITH_SETJMP
228 * Enables alternative status code handling using setjmps.
229 *
230 * This adds a bit of expense via the setjmp() call since it saves all the
231 * non-volatile registers. However, it eliminates return code checks and allows
232 * for more optimal return value passing (return regs instead of stack buffer).
233 */
234#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
235# define IEM_WITH_SETJMP
236#endif
237
238/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
239 * due to GCC lacking knowledge about the value range of a switch. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
241
242/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
244
245/**
246 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
247 * occation.
248 */
249#ifdef LOG_ENABLED
250# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
251 do { \
252 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
254 } while (0)
255#else
256# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
257 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
258#endif
259
260/**
261 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
262 * occation using the supplied logger statement.
263 *
264 * @param a_LoggerArgs What to log on failure.
265 */
266#ifdef LOG_ENABLED
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
268 do { \
269 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
270 /*LogFunc(a_LoggerArgs);*/ \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
272 } while (0)
273#else
274# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
275 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
276#endif
277
278/**
279 * Call an opcode decoder function.
280 *
281 * We're using macors for this so that adding and removing parameters can be
282 * done as we please. See FNIEMOP_DEF.
283 */
284#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
285
286/**
287 * Call a common opcode decoder function taking one extra argument.
288 *
289 * We're using macors for this so that adding and removing parameters can be
290 * done as we please. See FNIEMOP_DEF_1.
291 */
292#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
293
294/**
295 * Call a common opcode decoder function taking one extra argument.
296 *
297 * We're using macors for this so that adding and removing parameters can be
298 * done as we please. See FNIEMOP_DEF_1.
299 */
300#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
301
302/**
303 * Check if we're currently executing in real or virtual 8086 mode.
304 *
305 * @returns @c true if it is, @c false if not.
306 * @param a_pVCpu The IEM state of the current CPU.
307 */
308#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
309
310/**
311 * Check if we're currently executing in virtual 8086 mode.
312 *
313 * @returns @c true if it is, @c false if not.
314 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
315 */
316#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
317
318/**
319 * Check if we're currently executing in long mode.
320 *
321 * @returns @c true if it is, @c false if not.
322 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
323 */
324#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
325
326/**
327 * Check if we're currently executing in a 64-bit code segment.
328 *
329 * @returns @c true if it is, @c false if not.
330 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
331 */
332#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
333
334/**
335 * Check if we're currently executing in real mode.
336 *
337 * @returns @c true if it is, @c false if not.
338 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
339 */
340#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
341
342/**
343 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
344 * @returns PCCPUMFEATURES
345 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
346 */
347#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
348
349/**
350 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
351 * @returns PCCPUMFEATURES
352 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
353 */
354#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
355
356/**
357 * Evaluates to true if we're presenting an Intel CPU to the guest.
358 */
359#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
360
361/**
362 * Evaluates to true if we're presenting an AMD CPU to the guest.
363 */
364#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
365
366/**
367 * Check if the address is canonical.
368 */
369#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
370
371/**
372 * Gets the effective VEX.VVVV value.
373 *
374 * The 4th bit is ignored if not 64-bit code.
375 * @returns effective V-register value.
376 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
377 */
378#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
379 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
380
381/** @def IEM_USE_UNALIGNED_DATA_ACCESS
382 * Use unaligned accesses instead of elaborate byte assembly. */
383#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
384# define IEM_USE_UNALIGNED_DATA_ACCESS
385#endif
386
387#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
388
389/**
390 * Check if the guest has entered VMX root operation.
391 */
392# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
393
394/**
395 * Check if the guest has entered VMX non-root operation.
396 */
397# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
398
399/**
400 * Check if the nested-guest has the given Pin-based VM-execution control set.
401 */
402# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
403 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
404
405/**
406 * Check if the nested-guest has the given Processor-based VM-execution control set.
407 */
408#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
409 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
410
411/**
412 * Check if the nested-guest has the given Secondary Processor-based VM-execution
413 * control set.
414 */
415#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
416 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
417
418/**
419 * Invokes the VMX VM-exit handler for an instruction intercept.
420 */
421# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
422 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
423
424/**
425 * Invokes the VMX VM-exit handler for an instruction intercept where the
426 * instruction provides additional VM-exit information.
427 */
428# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
429 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
430
431/**
432 * Invokes the VMX VM-exit handler for a task switch.
433 */
434# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
435 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
436
437/**
438 * Invokes the VMX VM-exit handler for MWAIT.
439 */
440# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
441 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
442
443/**
444 * Invokes the VMX VM-exit handle for triple faults.
445 */
446# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
447 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
448
449#else
450# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
451# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
452# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
453# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
454# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
455# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
457# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
458# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
460
461#endif
462
463#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
464/**
465 * Check if an SVM control/instruction intercept is set.
466 */
467# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
468 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
469
470/**
471 * Check if an SVM read CRx intercept is set.
472 */
473# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
474 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
475
476/**
477 * Check if an SVM write CRx intercept is set.
478 */
479# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
480 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
481
482/**
483 * Check if an SVM read DRx intercept is set.
484 */
485# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
486 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
487
488/**
489 * Check if an SVM write DRx intercept is set.
490 */
491# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
492 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
493
494/**
495 * Check if an SVM exception intercept is set.
496 */
497# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
498 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
499
500/**
501 * Invokes the SVM \#VMEXIT handler for the nested-guest.
502 */
503# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
504 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
505
506/**
507 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
508 * corresponding decode assist information.
509 */
510# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
511 do \
512 { \
513 uint64_t uExitInfo1; \
514 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
515 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
516 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
517 else \
518 uExitInfo1 = 0; \
519 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
520 } while (0)
521
522/** Check and handles SVM nested-guest instruction intercept and updates
523 * NRIP if needed.
524 */
525# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
526 do \
527 { \
528 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
529 { \
530 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
531 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
532 } \
533 } while (0)
534
535/** Checks and handles SVM nested-guest CR0 read intercept. */
536# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
537 do \
538 { \
539 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
540 { /* probably likely */ } \
541 else \
542 { \
543 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
544 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
545 } \
546 } while (0)
547
548/**
549 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
550 */
551# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
552 do { \
553 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
554 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
555 } while (0)
556
557#else
558# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
559# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
560# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
561# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
562# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
563# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
564# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
565# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
566# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
567# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
568# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
569
570#endif
571
572
573/*********************************************************************************************************************************
574* Global Variables *
575*********************************************************************************************************************************/
576extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
577
578
579/** Function table for the ADD instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
581{
582 iemAImpl_add_u8, iemAImpl_add_u8_locked,
583 iemAImpl_add_u16, iemAImpl_add_u16_locked,
584 iemAImpl_add_u32, iemAImpl_add_u32_locked,
585 iemAImpl_add_u64, iemAImpl_add_u64_locked
586};
587
588/** Function table for the ADC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
590{
591 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
592 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
593 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
594 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
595};
596
597/** Function table for the SUB instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
599{
600 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
601 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
602 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
603 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
604};
605
606/** Function table for the SBB instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
608{
609 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
610 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
611 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
612 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
613};
614
615/** Function table for the OR instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
617{
618 iemAImpl_or_u8, iemAImpl_or_u8_locked,
619 iemAImpl_or_u16, iemAImpl_or_u16_locked,
620 iemAImpl_or_u32, iemAImpl_or_u32_locked,
621 iemAImpl_or_u64, iemAImpl_or_u64_locked
622};
623
624/** Function table for the XOR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
626{
627 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
628 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
629 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
630 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
631};
632
633/** Function table for the AND instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
635{
636 iemAImpl_and_u8, iemAImpl_and_u8_locked,
637 iemAImpl_and_u16, iemAImpl_and_u16_locked,
638 iemAImpl_and_u32, iemAImpl_and_u32_locked,
639 iemAImpl_and_u64, iemAImpl_and_u64_locked
640};
641
642/** Function table for the CMP instruction.
643 * @remarks Making operand order ASSUMPTIONS.
644 */
645IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
646{
647 iemAImpl_cmp_u8, NULL,
648 iemAImpl_cmp_u16, NULL,
649 iemAImpl_cmp_u32, NULL,
650 iemAImpl_cmp_u64, NULL
651};
652
653/** Function table for the TEST instruction.
654 * @remarks Making operand order ASSUMPTIONS.
655 */
656IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
657{
658 iemAImpl_test_u8, NULL,
659 iemAImpl_test_u16, NULL,
660 iemAImpl_test_u32, NULL,
661 iemAImpl_test_u64, NULL
662};
663
664/** Function table for the BT instruction. */
665IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
666{
667 NULL, NULL,
668 iemAImpl_bt_u16, NULL,
669 iemAImpl_bt_u32, NULL,
670 iemAImpl_bt_u64, NULL
671};
672
673/** Function table for the BTC instruction. */
674IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
675{
676 NULL, NULL,
677 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
678 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
679 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
680};
681
682/** Function table for the BTR instruction. */
683IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
684{
685 NULL, NULL,
686 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
687 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
688 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
689};
690
691/** Function table for the BTS instruction. */
692IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
693{
694 NULL, NULL,
695 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
696 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
697 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
698};
699
700/** Function table for the BSF instruction. */
701IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
702{
703 NULL, NULL,
704 iemAImpl_bsf_u16, NULL,
705 iemAImpl_bsf_u32, NULL,
706 iemAImpl_bsf_u64, NULL
707};
708
709/** Function table for the BSR instruction. */
710IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
711{
712 NULL, NULL,
713 iemAImpl_bsr_u16, NULL,
714 iemAImpl_bsr_u32, NULL,
715 iemAImpl_bsr_u64, NULL
716};
717
718/** Function table for the IMUL instruction. */
719IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
720{
721 NULL, NULL,
722 iemAImpl_imul_two_u16, NULL,
723 iemAImpl_imul_two_u32, NULL,
724 iemAImpl_imul_two_u64, NULL
725};
726
727/** Group 1 /r lookup table. */
728IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
729{
730 &g_iemAImpl_add,
731 &g_iemAImpl_or,
732 &g_iemAImpl_adc,
733 &g_iemAImpl_sbb,
734 &g_iemAImpl_and,
735 &g_iemAImpl_sub,
736 &g_iemAImpl_xor,
737 &g_iemAImpl_cmp
738};
739
740/** Function table for the INC instruction. */
741IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
742{
743 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
744 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
745 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
746 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
747};
748
749/** Function table for the DEC instruction. */
750IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
751{
752 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
753 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
754 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
755 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
756};
757
758/** Function table for the NEG instruction. */
759IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
760{
761 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
762 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
763 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
764 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
765};
766
767/** Function table for the NOT instruction. */
768IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
769{
770 iemAImpl_not_u8, iemAImpl_not_u8_locked,
771 iemAImpl_not_u16, iemAImpl_not_u16_locked,
772 iemAImpl_not_u32, iemAImpl_not_u32_locked,
773 iemAImpl_not_u64, iemAImpl_not_u64_locked
774};
775
776
777/** Function table for the ROL instruction. */
778IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
779{
780 iemAImpl_rol_u8,
781 iemAImpl_rol_u16,
782 iemAImpl_rol_u32,
783 iemAImpl_rol_u64
784};
785
786/** Function table for the ROR instruction. */
787IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
788{
789 iemAImpl_ror_u8,
790 iemAImpl_ror_u16,
791 iemAImpl_ror_u32,
792 iemAImpl_ror_u64
793};
794
795/** Function table for the RCL instruction. */
796IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
797{
798 iemAImpl_rcl_u8,
799 iemAImpl_rcl_u16,
800 iemAImpl_rcl_u32,
801 iemAImpl_rcl_u64
802};
803
804/** Function table for the RCR instruction. */
805IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
806{
807 iemAImpl_rcr_u8,
808 iemAImpl_rcr_u16,
809 iemAImpl_rcr_u32,
810 iemAImpl_rcr_u64
811};
812
813/** Function table for the SHL instruction. */
814IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
815{
816 iemAImpl_shl_u8,
817 iemAImpl_shl_u16,
818 iemAImpl_shl_u32,
819 iemAImpl_shl_u64
820};
821
822/** Function table for the SHR instruction. */
823IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
824{
825 iemAImpl_shr_u8,
826 iemAImpl_shr_u16,
827 iemAImpl_shr_u32,
828 iemAImpl_shr_u64
829};
830
831/** Function table for the SAR instruction. */
832IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
833{
834 iemAImpl_sar_u8,
835 iemAImpl_sar_u16,
836 iemAImpl_sar_u32,
837 iemAImpl_sar_u64
838};
839
840
841/** Function table for the MUL instruction. */
842IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
843{
844 iemAImpl_mul_u8,
845 iemAImpl_mul_u16,
846 iemAImpl_mul_u32,
847 iemAImpl_mul_u64
848};
849
850/** Function table for the IMUL instruction working implicitly on rAX. */
851IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
852{
853 iemAImpl_imul_u8,
854 iemAImpl_imul_u16,
855 iemAImpl_imul_u32,
856 iemAImpl_imul_u64
857};
858
859/** Function table for the DIV instruction. */
860IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
861{
862 iemAImpl_div_u8,
863 iemAImpl_div_u16,
864 iemAImpl_div_u32,
865 iemAImpl_div_u64
866};
867
868/** Function table for the MUL instruction. */
869IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
870{
871 iemAImpl_idiv_u8,
872 iemAImpl_idiv_u16,
873 iemAImpl_idiv_u32,
874 iemAImpl_idiv_u64
875};
876
877/** Function table for the SHLD instruction */
878IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
879{
880 iemAImpl_shld_u16,
881 iemAImpl_shld_u32,
882 iemAImpl_shld_u64,
883};
884
885/** Function table for the SHRD instruction */
886IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
887{
888 iemAImpl_shrd_u16,
889 iemAImpl_shrd_u32,
890 iemAImpl_shrd_u64,
891};
892
893
894/** Function table for the PUNPCKLBW instruction */
895IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
896/** Function table for the PUNPCKLBD instruction */
897IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
898/** Function table for the PUNPCKLDQ instruction */
899IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
900/** Function table for the PUNPCKLQDQ instruction */
901IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
902
903/** Function table for the PUNPCKHBW instruction */
904IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
905/** Function table for the PUNPCKHBD instruction */
906IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
907/** Function table for the PUNPCKHDQ instruction */
908IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
909/** Function table for the PUNPCKHQDQ instruction */
910IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
911
912/** Function table for the PXOR instruction */
913IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
914/** Function table for the PCMPEQB instruction */
915IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
916/** Function table for the PCMPEQW instruction */
917IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
918/** Function table for the PCMPEQD instruction */
919IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
920
921
922#if defined(IEM_LOG_MEMORY_WRITES)
923/** What IEM just wrote. */
924uint8_t g_abIemWrote[256];
925/** How much IEM just wrote. */
926size_t g_cbIemWrote;
927#endif
928
929
930/*********************************************************************************************************************************
931* Internal Functions *
932*********************************************************************************************************************************/
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
934IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
935IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
937/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
938IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
939IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
941IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
942IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
944IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
945IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
946IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
947IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
948IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
950#ifdef IEM_WITH_SETJMP
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
953DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
956#endif
957
958IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
959IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
960IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
970IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
972IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
973IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
974IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
975
976#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
979 uint8_t cbInstr);
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
986#endif
987
988#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
989IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
990IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr,
991 uint64_t uCr2);
992#endif
993
994
995/**
996 * Sets the pass up status.
997 *
998 * @returns VINF_SUCCESS.
999 * @param pVCpu The cross context virtual CPU structure of the
1000 * calling thread.
1001 * @param rcPassUp The pass up status. Must be informational.
1002 * VINF_SUCCESS is not allowed.
1003 */
1004IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1005{
1006 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1007
1008 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1009 if (rcOldPassUp == VINF_SUCCESS)
1010 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1011 /* If both are EM scheduling codes, use EM priority rules. */
1012 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1013 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1014 {
1015 if (rcPassUp < rcOldPassUp)
1016 {
1017 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1018 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1019 }
1020 else
1021 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 }
1023 /* Override EM scheduling with specific status code. */
1024 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1025 {
1026 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1028 }
1029 /* Don't override specific status code, first come first served. */
1030 else
1031 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1032 return VINF_SUCCESS;
1033}
1034
1035
1036/**
1037 * Calculates the CPU mode.
1038 *
1039 * This is mainly for updating IEMCPU::enmCpuMode.
1040 *
1041 * @returns CPU mode.
1042 * @param pVCpu The cross context virtual CPU structure of the
1043 * calling thread.
1044 */
1045DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1046{
1047 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1048 return IEMMODE_64BIT;
1049 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1050 return IEMMODE_32BIT;
1051 return IEMMODE_16BIT;
1052}
1053
1054
1055/**
1056 * Initializes the execution state.
1057 *
1058 * @param pVCpu The cross context virtual CPU structure of the
1059 * calling thread.
1060 * @param fBypassHandlers Whether to bypass access handlers.
1061 *
1062 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1063 * side-effects in strict builds.
1064 */
1065DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1066{
1067 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1068 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1069
1070#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1079#endif
1080
1081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1082 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1083#endif
1084 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1085 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1086#ifdef VBOX_STRICT
1087 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1088 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1089 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1090 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1091 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1092 pVCpu->iem.s.uRexReg = 127;
1093 pVCpu->iem.s.uRexB = 127;
1094 pVCpu->iem.s.offModRm = 127;
1095 pVCpu->iem.s.uRexIndex = 127;
1096 pVCpu->iem.s.iEffSeg = 127;
1097 pVCpu->iem.s.idxPrefix = 127;
1098 pVCpu->iem.s.uVex3rdReg = 127;
1099 pVCpu->iem.s.uVexLength = 127;
1100 pVCpu->iem.s.fEvexStuff = 127;
1101 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1102# ifdef IEM_WITH_CODE_TLB
1103 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1104 pVCpu->iem.s.pbInstrBuf = NULL;
1105 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1106 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1107 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1108 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1109# else
1110 pVCpu->iem.s.offOpcode = 127;
1111 pVCpu->iem.s.cbOpcode = 127;
1112# endif
1113#endif
1114
1115 pVCpu->iem.s.cActiveMappings = 0;
1116 pVCpu->iem.s.iNextMapping = 0;
1117 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1118 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1119#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1120 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1121 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1122 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1123 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1124 if (!pVCpu->iem.s.fInPatchCode)
1125 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1126#endif
1127}
1128
1129#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1130/**
1131 * Performs a minimal reinitialization of the execution state.
1132 *
1133 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1134 * 'world-switch' types operations on the CPU. Currently only nested
1135 * hardware-virtualization uses it.
1136 *
1137 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1138 */
1139IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1140{
1141 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1142 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1143
1144 pVCpu->iem.s.uCpl = uCpl;
1145 pVCpu->iem.s.enmCpuMode = enmMode;
1146 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1147 pVCpu->iem.s.enmEffAddrMode = enmMode;
1148 if (enmMode != IEMMODE_64BIT)
1149 {
1150 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1151 pVCpu->iem.s.enmEffOpSize = enmMode;
1152 }
1153 else
1154 {
1155 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1156 pVCpu->iem.s.enmEffOpSize = enmMode;
1157 }
1158 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1159#ifndef IEM_WITH_CODE_TLB
1160 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1161 pVCpu->iem.s.offOpcode = 0;
1162 pVCpu->iem.s.cbOpcode = 0;
1163#endif
1164 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1165}
1166#endif
1167
1168/**
1169 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1170 *
1171 * @param pVCpu The cross context virtual CPU structure of the
1172 * calling thread.
1173 */
1174DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1175{
1176 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1177#ifdef VBOX_STRICT
1178# ifdef IEM_WITH_CODE_TLB
1179 NOREF(pVCpu);
1180# else
1181 pVCpu->iem.s.cbOpcode = 0;
1182# endif
1183#else
1184 NOREF(pVCpu);
1185#endif
1186}
1187
1188
1189/**
1190 * Initializes the decoder state.
1191 *
1192 * iemReInitDecoder is mostly a copy of this function.
1193 *
1194 * @param pVCpu The cross context virtual CPU structure of the
1195 * calling thread.
1196 * @param fBypassHandlers Whether to bypass access handlers.
1197 */
1198DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1199{
1200 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1201 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1202
1203#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1212#endif
1213
1214#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1215 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1216#endif
1217 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1218 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1219 pVCpu->iem.s.enmCpuMode = enmMode;
1220 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1221 pVCpu->iem.s.enmEffAddrMode = enmMode;
1222 if (enmMode != IEMMODE_64BIT)
1223 {
1224 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1225 pVCpu->iem.s.enmEffOpSize = enmMode;
1226 }
1227 else
1228 {
1229 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1230 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1231 }
1232 pVCpu->iem.s.fPrefixes = 0;
1233 pVCpu->iem.s.uRexReg = 0;
1234 pVCpu->iem.s.uRexB = 0;
1235 pVCpu->iem.s.uRexIndex = 0;
1236 pVCpu->iem.s.idxPrefix = 0;
1237 pVCpu->iem.s.uVex3rdReg = 0;
1238 pVCpu->iem.s.uVexLength = 0;
1239 pVCpu->iem.s.fEvexStuff = 0;
1240 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1241#ifdef IEM_WITH_CODE_TLB
1242 pVCpu->iem.s.pbInstrBuf = NULL;
1243 pVCpu->iem.s.offInstrNextByte = 0;
1244 pVCpu->iem.s.offCurInstrStart = 0;
1245# ifdef VBOX_STRICT
1246 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1247 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1248 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1249# endif
1250#else
1251 pVCpu->iem.s.offOpcode = 0;
1252 pVCpu->iem.s.cbOpcode = 0;
1253#endif
1254 pVCpu->iem.s.offModRm = 0;
1255 pVCpu->iem.s.cActiveMappings = 0;
1256 pVCpu->iem.s.iNextMapping = 0;
1257 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1258 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1259#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1260 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1261 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1262 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1263 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1264 if (!pVCpu->iem.s.fInPatchCode)
1265 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1266#endif
1267
1268#ifdef DBGFTRACE_ENABLED
1269 switch (enmMode)
1270 {
1271 case IEMMODE_64BIT:
1272 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1273 break;
1274 case IEMMODE_32BIT:
1275 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1276 break;
1277 case IEMMODE_16BIT:
1278 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1279 break;
1280 }
1281#endif
1282}
1283
1284
1285/**
1286 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1287 *
1288 * This is mostly a copy of iemInitDecoder.
1289 *
1290 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1291 */
1292DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1293{
1294 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1295
1296#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1298 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1300 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1302 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1305#endif
1306
1307 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1308 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1309 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1310 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1311 pVCpu->iem.s.enmEffAddrMode = enmMode;
1312 if (enmMode != IEMMODE_64BIT)
1313 {
1314 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1315 pVCpu->iem.s.enmEffOpSize = enmMode;
1316 }
1317 else
1318 {
1319 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1320 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1321 }
1322 pVCpu->iem.s.fPrefixes = 0;
1323 pVCpu->iem.s.uRexReg = 0;
1324 pVCpu->iem.s.uRexB = 0;
1325 pVCpu->iem.s.uRexIndex = 0;
1326 pVCpu->iem.s.idxPrefix = 0;
1327 pVCpu->iem.s.uVex3rdReg = 0;
1328 pVCpu->iem.s.uVexLength = 0;
1329 pVCpu->iem.s.fEvexStuff = 0;
1330 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1331#ifdef IEM_WITH_CODE_TLB
1332 if (pVCpu->iem.s.pbInstrBuf)
1333 {
1334 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1335 - pVCpu->iem.s.uInstrBufPc;
1336 if (off < pVCpu->iem.s.cbInstrBufTotal)
1337 {
1338 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1339 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1340 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1341 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1342 else
1343 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1344 }
1345 else
1346 {
1347 pVCpu->iem.s.pbInstrBuf = NULL;
1348 pVCpu->iem.s.offInstrNextByte = 0;
1349 pVCpu->iem.s.offCurInstrStart = 0;
1350 pVCpu->iem.s.cbInstrBuf = 0;
1351 pVCpu->iem.s.cbInstrBufTotal = 0;
1352 }
1353 }
1354 else
1355 {
1356 pVCpu->iem.s.offInstrNextByte = 0;
1357 pVCpu->iem.s.offCurInstrStart = 0;
1358 pVCpu->iem.s.cbInstrBuf = 0;
1359 pVCpu->iem.s.cbInstrBufTotal = 0;
1360 }
1361#else
1362 pVCpu->iem.s.cbOpcode = 0;
1363 pVCpu->iem.s.offOpcode = 0;
1364#endif
1365 pVCpu->iem.s.offModRm = 0;
1366 Assert(pVCpu->iem.s.cActiveMappings == 0);
1367 pVCpu->iem.s.iNextMapping = 0;
1368 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1369 Assert(pVCpu->iem.s.fBypassHandlers == false);
1370#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1371 if (!pVCpu->iem.s.fInPatchCode)
1372 { /* likely */ }
1373 else
1374 {
1375 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1376 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1377 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1378 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1379 if (!pVCpu->iem.s.fInPatchCode)
1380 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1381 }
1382#endif
1383
1384#ifdef DBGFTRACE_ENABLED
1385 switch (enmMode)
1386 {
1387 case IEMMODE_64BIT:
1388 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1389 break;
1390 case IEMMODE_32BIT:
1391 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1392 break;
1393 case IEMMODE_16BIT:
1394 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1395 break;
1396 }
1397#endif
1398}
1399
1400
1401
1402/**
1403 * Prefetch opcodes the first time when starting executing.
1404 *
1405 * @returns Strict VBox status code.
1406 * @param pVCpu The cross context virtual CPU structure of the
1407 * calling thread.
1408 * @param fBypassHandlers Whether to bypass access handlers.
1409 */
1410IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1411{
1412 iemInitDecoder(pVCpu, fBypassHandlers);
1413
1414#ifdef IEM_WITH_CODE_TLB
1415 /** @todo Do ITLB lookup here. */
1416
1417#else /* !IEM_WITH_CODE_TLB */
1418
1419 /*
1420 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1421 *
1422 * First translate CS:rIP to a physical address.
1423 */
1424 uint32_t cbToTryRead;
1425 RTGCPTR GCPtrPC;
1426 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1427 {
1428 cbToTryRead = PAGE_SIZE;
1429 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1430 if (IEM_IS_CANONICAL(GCPtrPC))
1431 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1432 else
1433 return iemRaiseGeneralProtectionFault0(pVCpu);
1434 }
1435 else
1436 {
1437 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1438 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1439 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1440 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1441 else
1442 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1443 if (cbToTryRead) { /* likely */ }
1444 else /* overflowed */
1445 {
1446 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1447 cbToTryRead = UINT32_MAX;
1448 }
1449 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1450 Assert(GCPtrPC <= UINT32_MAX);
1451 }
1452
1453# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1454 /* Allow interpretation of patch manager code blocks since they can for
1455 instance throw #PFs for perfectly good reasons. */
1456 if (pVCpu->iem.s.fInPatchCode)
1457 {
1458 size_t cbRead = 0;
1459 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1460 AssertRCReturn(rc, rc);
1461 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1462 return VINF_SUCCESS;
1463 }
1464# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1465
1466 RTGCPHYS GCPhys;
1467 uint64_t fFlags;
1468 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1469 if (RT_SUCCESS(rc)) { /* probable */ }
1470 else
1471 {
1472 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1473 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1474 }
1475 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1476 else
1477 {
1478 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1479 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1480 }
1481 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1482 else
1483 {
1484 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1485 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1486 }
1487 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1488 /** @todo Check reserved bits and such stuff. PGM is better at doing
1489 * that, so do it when implementing the guest virtual address
1490 * TLB... */
1491
1492 /*
1493 * Read the bytes at this address.
1494 */
1495 PVM pVM = pVCpu->CTX_SUFF(pVM);
1496# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1497 size_t cbActual;
1498 if ( PATMIsEnabled(pVM)
1499 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1500 {
1501 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1502 Assert(cbActual > 0);
1503 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1504 }
1505 else
1506# endif
1507 {
1508 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1509 if (cbToTryRead > cbLeftOnPage)
1510 cbToTryRead = cbLeftOnPage;
1511 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1512 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1513
1514 if (!pVCpu->iem.s.fBypassHandlers)
1515 {
1516 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1517 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1518 { /* likely */ }
1519 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1520 {
1521 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1522 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1523 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1524 }
1525 else
1526 {
1527 Log((RT_SUCCESS(rcStrict)
1528 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1529 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1530 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1531 return rcStrict;
1532 }
1533 }
1534 else
1535 {
1536 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1537 if (RT_SUCCESS(rc))
1538 { /* likely */ }
1539 else
1540 {
1541 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1542 GCPtrPC, GCPhys, rc, cbToTryRead));
1543 return rc;
1544 }
1545 }
1546 pVCpu->iem.s.cbOpcode = cbToTryRead;
1547 }
1548#endif /* !IEM_WITH_CODE_TLB */
1549 return VINF_SUCCESS;
1550}
1551
1552
1553/**
1554 * Invalidates the IEM TLBs.
1555 *
1556 * This is called internally as well as by PGM when moving GC mappings.
1557 *
1558 * @returns
1559 * @param pVCpu The cross context virtual CPU structure of the calling
1560 * thread.
1561 * @param fVmm Set when PGM calls us with a remapping.
1562 */
1563VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1564{
1565#ifdef IEM_WITH_CODE_TLB
1566 pVCpu->iem.s.cbInstrBufTotal = 0;
1567 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1568 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1569 { /* very likely */ }
1570 else
1571 {
1572 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1573 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1574 while (i-- > 0)
1575 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1576 }
1577#endif
1578
1579#ifdef IEM_WITH_DATA_TLB
1580 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1581 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1582 { /* very likely */ }
1583 else
1584 {
1585 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1586 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1587 while (i-- > 0)
1588 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1589 }
1590#endif
1591 NOREF(pVCpu); NOREF(fVmm);
1592}
1593
1594
1595/**
1596 * Invalidates a page in the TLBs.
1597 *
1598 * @param pVCpu The cross context virtual CPU structure of the calling
1599 * thread.
1600 * @param GCPtr The address of the page to invalidate
1601 */
1602VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1603{
1604#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1605 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1606 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1607 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1608 uintptr_t idx = (uint8_t)GCPtr;
1609
1610# ifdef IEM_WITH_CODE_TLB
1611 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1612 {
1613 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1614 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1615 pVCpu->iem.s.cbInstrBufTotal = 0;
1616 }
1617# endif
1618
1619# ifdef IEM_WITH_DATA_TLB
1620 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1621 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1622# endif
1623#else
1624 NOREF(pVCpu); NOREF(GCPtr);
1625#endif
1626}
1627
1628
1629/**
1630 * Invalidates the host physical aspects of the IEM TLBs.
1631 *
1632 * This is called internally as well as by PGM when moving GC mappings.
1633 *
1634 * @param pVCpu The cross context virtual CPU structure of the calling
1635 * thread.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1638{
1639#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1640 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1641
1642# ifdef IEM_WITH_CODE_TLB
1643 pVCpu->iem.s.cbInstrBufTotal = 0;
1644# endif
1645 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1646 if (uTlbPhysRev != 0)
1647 {
1648 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1649 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1650 }
1651 else
1652 {
1653 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1654 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1655
1656 unsigned i;
1657# ifdef IEM_WITH_CODE_TLB
1658 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1659 while (i-- > 0)
1660 {
1661 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1662 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1663 }
1664# endif
1665# ifdef IEM_WITH_DATA_TLB
1666 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1667 while (i-- > 0)
1668 {
1669 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1670 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1671 }
1672# endif
1673 }
1674#else
1675 NOREF(pVCpu);
1676#endif
1677}
1678
1679
1680/**
1681 * Invalidates the host physical aspects of the IEM TLBs.
1682 *
1683 * This is called internally as well as by PGM when moving GC mappings.
1684 *
1685 * @param pVM The cross context VM structure.
1686 *
1687 * @remarks Caller holds the PGM lock.
1688 */
1689VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1690{
1691 RT_NOREF_PV(pVM);
1692}
1693
1694#ifdef IEM_WITH_CODE_TLB
1695
1696/**
1697 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1698 * failure and jumps.
1699 *
1700 * We end up here for a number of reasons:
1701 * - pbInstrBuf isn't yet initialized.
1702 * - Advancing beyond the buffer boundrary (e.g. cross page).
1703 * - Advancing beyond the CS segment limit.
1704 * - Fetching from non-mappable page (e.g. MMIO).
1705 *
1706 * @param pVCpu The cross context virtual CPU structure of the
1707 * calling thread.
1708 * @param pvDst Where to return the bytes.
1709 * @param cbDst Number of bytes to read.
1710 *
1711 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1712 */
1713IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1714{
1715#ifdef IN_RING3
1716 for (;;)
1717 {
1718 Assert(cbDst <= 8);
1719 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1720
1721 /*
1722 * We might have a partial buffer match, deal with that first to make the
1723 * rest simpler. This is the first part of the cross page/buffer case.
1724 */
1725 if (pVCpu->iem.s.pbInstrBuf != NULL)
1726 {
1727 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1728 {
1729 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1730 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1731 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1732
1733 cbDst -= cbCopy;
1734 pvDst = (uint8_t *)pvDst + cbCopy;
1735 offBuf += cbCopy;
1736 pVCpu->iem.s.offInstrNextByte += offBuf;
1737 }
1738 }
1739
1740 /*
1741 * Check segment limit, figuring how much we're allowed to access at this point.
1742 *
1743 * We will fault immediately if RIP is past the segment limit / in non-canonical
1744 * territory. If we do continue, there are one or more bytes to read before we
1745 * end up in trouble and we need to do that first before faulting.
1746 */
1747 RTGCPTR GCPtrFirst;
1748 uint32_t cbMaxRead;
1749 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1750 {
1751 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1752 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1753 { /* likely */ }
1754 else
1755 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1756 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1757 }
1758 else
1759 {
1760 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1761 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1762 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1763 { /* likely */ }
1764 else
1765 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1766 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1767 if (cbMaxRead != 0)
1768 { /* likely */ }
1769 else
1770 {
1771 /* Overflowed because address is 0 and limit is max. */
1772 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1773 cbMaxRead = X86_PAGE_SIZE;
1774 }
1775 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1776 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1777 if (cbMaxRead2 < cbMaxRead)
1778 cbMaxRead = cbMaxRead2;
1779 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1780 }
1781
1782 /*
1783 * Get the TLB entry for this piece of code.
1784 */
1785 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1786 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1787 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1788 if (pTlbe->uTag == uTag)
1789 {
1790 /* likely when executing lots of code, otherwise unlikely */
1791# ifdef VBOX_WITH_STATISTICS
1792 pVCpu->iem.s.CodeTlb.cTlbHits++;
1793# endif
1794 }
1795 else
1796 {
1797 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1798# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1799 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1800 {
1801 pTlbe->uTag = uTag;
1802 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1803 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1804 pTlbe->GCPhys = NIL_RTGCPHYS;
1805 pTlbe->pbMappingR3 = NULL;
1806 }
1807 else
1808# endif
1809 {
1810 RTGCPHYS GCPhys;
1811 uint64_t fFlags;
1812 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1813 if (RT_FAILURE(rc))
1814 {
1815 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1816 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1817 }
1818
1819 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1820 pTlbe->uTag = uTag;
1821 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1822 pTlbe->GCPhys = GCPhys;
1823 pTlbe->pbMappingR3 = NULL;
1824 }
1825 }
1826
1827 /*
1828 * Check TLB page table level access flags.
1829 */
1830 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1831 {
1832 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1833 {
1834 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1835 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1836 }
1837 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1838 {
1839 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1840 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1841 }
1842 }
1843
1844# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1845 /*
1846 * Allow interpretation of patch manager code blocks since they can for
1847 * instance throw #PFs for perfectly good reasons.
1848 */
1849 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1850 { /* no unlikely */ }
1851 else
1852 {
1853 /** @todo Could be optimized this a little in ring-3 if we liked. */
1854 size_t cbRead = 0;
1855 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1856 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1857 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1858 return;
1859 }
1860# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1861
1862 /*
1863 * Look up the physical page info if necessary.
1864 */
1865 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1866 { /* not necessary */ }
1867 else
1868 {
1869 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1870 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1871 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1872 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1873 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1874 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1875 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1876 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1877 }
1878
1879# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1880 /*
1881 * Try do a direct read using the pbMappingR3 pointer.
1882 */
1883 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1884 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1885 {
1886 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1887 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1888 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1889 {
1890 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1891 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1892 }
1893 else
1894 {
1895 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1896 Assert(cbInstr < cbMaxRead);
1897 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1898 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1899 }
1900 if (cbDst <= cbMaxRead)
1901 {
1902 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1903 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1904 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1905 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1906 return;
1907 }
1908 pVCpu->iem.s.pbInstrBuf = NULL;
1909
1910 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1911 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1912 }
1913 else
1914# endif
1915#if 0
1916 /*
1917 * If there is no special read handling, so we can read a bit more and
1918 * put it in the prefetch buffer.
1919 */
1920 if ( cbDst < cbMaxRead
1921 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1922 {
1923 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1924 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1925 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1926 { /* likely */ }
1927 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1928 {
1929 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1930 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1931 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1932 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1933 }
1934 else
1935 {
1936 Log((RT_SUCCESS(rcStrict)
1937 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1938 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1939 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1940 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1941 }
1942 }
1943 /*
1944 * Special read handling, so only read exactly what's needed.
1945 * This is a highly unlikely scenario.
1946 */
1947 else
1948#endif
1949 {
1950 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1951 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1952 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1953 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1954 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1955 { /* likely */ }
1956 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1957 {
1958 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1959 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1960 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1961 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1962 }
1963 else
1964 {
1965 Log((RT_SUCCESS(rcStrict)
1966 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1967 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1968 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1969 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1970 }
1971 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1972 if (cbToRead == cbDst)
1973 return;
1974 }
1975
1976 /*
1977 * More to read, loop.
1978 */
1979 cbDst -= cbMaxRead;
1980 pvDst = (uint8_t *)pvDst + cbMaxRead;
1981 }
1982#else
1983 RT_NOREF(pvDst, cbDst);
1984 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1985#endif
1986}
1987
1988#else
1989
1990/**
1991 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1992 * exception if it fails.
1993 *
1994 * @returns Strict VBox status code.
1995 * @param pVCpu The cross context virtual CPU structure of the
1996 * calling thread.
1997 * @param cbMin The minimum number of bytes relative offOpcode
1998 * that must be read.
1999 */
2000IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2001{
2002 /*
2003 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2004 *
2005 * First translate CS:rIP to a physical address.
2006 */
2007 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2008 uint32_t cbToTryRead;
2009 RTGCPTR GCPtrNext;
2010 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2011 {
2012 cbToTryRead = PAGE_SIZE;
2013 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2014 if (!IEM_IS_CANONICAL(GCPtrNext))
2015 return iemRaiseGeneralProtectionFault0(pVCpu);
2016 }
2017 else
2018 {
2019 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2020 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2021 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2022 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2023 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2024 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2025 if (!cbToTryRead) /* overflowed */
2026 {
2027 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2028 cbToTryRead = UINT32_MAX;
2029 /** @todo check out wrapping around the code segment. */
2030 }
2031 if (cbToTryRead < cbMin - cbLeft)
2032 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2033 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2034 }
2035
2036 /* Only read up to the end of the page, and make sure we don't read more
2037 than the opcode buffer can hold. */
2038 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2039 if (cbToTryRead > cbLeftOnPage)
2040 cbToTryRead = cbLeftOnPage;
2041 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2042 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2043/** @todo r=bird: Convert assertion into undefined opcode exception? */
2044 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2045
2046# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2047 /* Allow interpretation of patch manager code blocks since they can for
2048 instance throw #PFs for perfectly good reasons. */
2049 if (pVCpu->iem.s.fInPatchCode)
2050 {
2051 size_t cbRead = 0;
2052 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2053 AssertRCReturn(rc, rc);
2054 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2055 return VINF_SUCCESS;
2056 }
2057# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2058
2059 RTGCPHYS GCPhys;
2060 uint64_t fFlags;
2061 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2062 if (RT_FAILURE(rc))
2063 {
2064 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2065 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2066 }
2067 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2068 {
2069 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2070 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2071 }
2072 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2073 {
2074 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2075 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2076 }
2077 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2078 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2079 /** @todo Check reserved bits and such stuff. PGM is better at doing
2080 * that, so do it when implementing the guest virtual address
2081 * TLB... */
2082
2083 /*
2084 * Read the bytes at this address.
2085 *
2086 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2087 * and since PATM should only patch the start of an instruction there
2088 * should be no need to check again here.
2089 */
2090 if (!pVCpu->iem.s.fBypassHandlers)
2091 {
2092 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2093 cbToTryRead, PGMACCESSORIGIN_IEM);
2094 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2095 { /* likely */ }
2096 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2097 {
2098 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2099 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2100 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2101 }
2102 else
2103 {
2104 Log((RT_SUCCESS(rcStrict)
2105 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2106 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2107 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2108 return rcStrict;
2109 }
2110 }
2111 else
2112 {
2113 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2114 if (RT_SUCCESS(rc))
2115 { /* likely */ }
2116 else
2117 {
2118 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2119 return rc;
2120 }
2121 }
2122 pVCpu->iem.s.cbOpcode += cbToTryRead;
2123 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2124
2125 return VINF_SUCCESS;
2126}
2127
2128#endif /* !IEM_WITH_CODE_TLB */
2129#ifndef IEM_WITH_SETJMP
2130
2131/**
2132 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2133 *
2134 * @returns Strict VBox status code.
2135 * @param pVCpu The cross context virtual CPU structure of the
2136 * calling thread.
2137 * @param pb Where to return the opcode byte.
2138 */
2139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2140{
2141 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2142 if (rcStrict == VINF_SUCCESS)
2143 {
2144 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2145 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2146 pVCpu->iem.s.offOpcode = offOpcode + 1;
2147 }
2148 else
2149 *pb = 0;
2150 return rcStrict;
2151}
2152
2153
2154/**
2155 * Fetches the next opcode byte.
2156 *
2157 * @returns Strict VBox status code.
2158 * @param pVCpu The cross context virtual CPU structure of the
2159 * calling thread.
2160 * @param pu8 Where to return the opcode byte.
2161 */
2162DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2163{
2164 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2165 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2166 {
2167 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2168 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2169 return VINF_SUCCESS;
2170 }
2171 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2172}
2173
2174#else /* IEM_WITH_SETJMP */
2175
2176/**
2177 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2178 *
2179 * @returns The opcode byte.
2180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2181 */
2182DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2183{
2184# ifdef IEM_WITH_CODE_TLB
2185 uint8_t u8;
2186 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2187 return u8;
2188# else
2189 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2190 if (rcStrict == VINF_SUCCESS)
2191 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2192 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2193# endif
2194}
2195
2196
2197/**
2198 * Fetches the next opcode byte, longjmp on error.
2199 *
2200 * @returns The opcode byte.
2201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2202 */
2203DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2204{
2205# ifdef IEM_WITH_CODE_TLB
2206 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2207 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2208 if (RT_LIKELY( pbBuf != NULL
2209 && offBuf < pVCpu->iem.s.cbInstrBuf))
2210 {
2211 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2212 return pbBuf[offBuf];
2213 }
2214# else
2215 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2216 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2217 {
2218 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2219 return pVCpu->iem.s.abOpcode[offOpcode];
2220 }
2221# endif
2222 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2223}
2224
2225#endif /* IEM_WITH_SETJMP */
2226
2227/**
2228 * Fetches the next opcode byte, returns automatically on failure.
2229 *
2230 * @param a_pu8 Where to return the opcode byte.
2231 * @remark Implicitly references pVCpu.
2232 */
2233#ifndef IEM_WITH_SETJMP
2234# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2235 do \
2236 { \
2237 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2238 if (rcStrict2 == VINF_SUCCESS) \
2239 { /* likely */ } \
2240 else \
2241 return rcStrict2; \
2242 } while (0)
2243#else
2244# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2245#endif /* IEM_WITH_SETJMP */
2246
2247
2248#ifndef IEM_WITH_SETJMP
2249/**
2250 * Fetches the next signed byte from the opcode stream.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pi8 Where to return the signed byte.
2255 */
2256DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2257{
2258 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2259}
2260#endif /* !IEM_WITH_SETJMP */
2261
2262
2263/**
2264 * Fetches the next signed byte from the opcode stream, returning automatically
2265 * on failure.
2266 *
2267 * @param a_pi8 Where to return the signed byte.
2268 * @remark Implicitly references pVCpu.
2269 */
2270#ifndef IEM_WITH_SETJMP
2271# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2272 do \
2273 { \
2274 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2275 if (rcStrict2 != VINF_SUCCESS) \
2276 return rcStrict2; \
2277 } while (0)
2278#else /* IEM_WITH_SETJMP */
2279# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2280
2281#endif /* IEM_WITH_SETJMP */
2282
2283#ifndef IEM_WITH_SETJMP
2284
2285/**
2286 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2287 *
2288 * @returns Strict VBox status code.
2289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2290 * @param pu16 Where to return the opcode dword.
2291 */
2292DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2293{
2294 uint8_t u8;
2295 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2296 if (rcStrict == VINF_SUCCESS)
2297 *pu16 = (int8_t)u8;
2298 return rcStrict;
2299}
2300
2301
2302/**
2303 * Fetches the next signed byte from the opcode stream, extending it to
2304 * unsigned 16-bit.
2305 *
2306 * @returns Strict VBox status code.
2307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2308 * @param pu16 Where to return the unsigned word.
2309 */
2310DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2311{
2312 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2313 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2314 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2315
2316 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2317 pVCpu->iem.s.offOpcode = offOpcode + 1;
2318 return VINF_SUCCESS;
2319}
2320
2321#endif /* !IEM_WITH_SETJMP */
2322
2323/**
2324 * Fetches the next signed byte from the opcode stream and sign-extending it to
2325 * a word, returning automatically on failure.
2326 *
2327 * @param a_pu16 Where to return the word.
2328 * @remark Implicitly references pVCpu.
2329 */
2330#ifndef IEM_WITH_SETJMP
2331# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2332 do \
2333 { \
2334 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2335 if (rcStrict2 != VINF_SUCCESS) \
2336 return rcStrict2; \
2337 } while (0)
2338#else
2339# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2340#endif
2341
2342#ifndef IEM_WITH_SETJMP
2343
2344/**
2345 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2346 *
2347 * @returns Strict VBox status code.
2348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2349 * @param pu32 Where to return the opcode dword.
2350 */
2351DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2352{
2353 uint8_t u8;
2354 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2355 if (rcStrict == VINF_SUCCESS)
2356 *pu32 = (int8_t)u8;
2357 return rcStrict;
2358}
2359
2360
2361/**
2362 * Fetches the next signed byte from the opcode stream, extending it to
2363 * unsigned 32-bit.
2364 *
2365 * @returns Strict VBox status code.
2366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2367 * @param pu32 Where to return the unsigned dword.
2368 */
2369DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2370{
2371 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2372 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2373 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2374
2375 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2376 pVCpu->iem.s.offOpcode = offOpcode + 1;
2377 return VINF_SUCCESS;
2378}
2379
2380#endif /* !IEM_WITH_SETJMP */
2381
2382/**
2383 * Fetches the next signed byte from the opcode stream and sign-extending it to
2384 * a word, returning automatically on failure.
2385 *
2386 * @param a_pu32 Where to return the word.
2387 * @remark Implicitly references pVCpu.
2388 */
2389#ifndef IEM_WITH_SETJMP
2390#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2391 do \
2392 { \
2393 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2394 if (rcStrict2 != VINF_SUCCESS) \
2395 return rcStrict2; \
2396 } while (0)
2397#else
2398# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2399#endif
2400
2401#ifndef IEM_WITH_SETJMP
2402
2403/**
2404 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2405 *
2406 * @returns Strict VBox status code.
2407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2408 * @param pu64 Where to return the opcode qword.
2409 */
2410DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2411{
2412 uint8_t u8;
2413 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2414 if (rcStrict == VINF_SUCCESS)
2415 *pu64 = (int8_t)u8;
2416 return rcStrict;
2417}
2418
2419
2420/**
2421 * Fetches the next signed byte from the opcode stream, extending it to
2422 * unsigned 64-bit.
2423 *
2424 * @returns Strict VBox status code.
2425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2426 * @param pu64 Where to return the unsigned qword.
2427 */
2428DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2429{
2430 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2431 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2432 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2433
2434 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2435 pVCpu->iem.s.offOpcode = offOpcode + 1;
2436 return VINF_SUCCESS;
2437}
2438
2439#endif /* !IEM_WITH_SETJMP */
2440
2441
2442/**
2443 * Fetches the next signed byte from the opcode stream and sign-extending it to
2444 * a word, returning automatically on failure.
2445 *
2446 * @param a_pu64 Where to return the word.
2447 * @remark Implicitly references pVCpu.
2448 */
2449#ifndef IEM_WITH_SETJMP
2450# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2451 do \
2452 { \
2453 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2454 if (rcStrict2 != VINF_SUCCESS) \
2455 return rcStrict2; \
2456 } while (0)
2457#else
2458# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2459#endif
2460
2461
2462#ifndef IEM_WITH_SETJMP
2463/**
2464 * Fetches the next opcode byte.
2465 *
2466 * @returns Strict VBox status code.
2467 * @param pVCpu The cross context virtual CPU structure of the
2468 * calling thread.
2469 * @param pu8 Where to return the opcode byte.
2470 */
2471DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2472{
2473 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2474 pVCpu->iem.s.offModRm = offOpcode;
2475 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2476 {
2477 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2478 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2479 return VINF_SUCCESS;
2480 }
2481 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2482}
2483#else /* IEM_WITH_SETJMP */
2484/**
2485 * Fetches the next opcode byte, longjmp on error.
2486 *
2487 * @returns The opcode byte.
2488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2489 */
2490DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2491{
2492# ifdef IEM_WITH_CODE_TLB
2493 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2494 pVCpu->iem.s.offModRm = offBuf;
2495 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2496 if (RT_LIKELY( pbBuf != NULL
2497 && offBuf < pVCpu->iem.s.cbInstrBuf))
2498 {
2499 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2500 return pbBuf[offBuf];
2501 }
2502# else
2503 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2504 pVCpu->iem.s.offModRm = offOpcode;
2505 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2506 {
2507 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2508 return pVCpu->iem.s.abOpcode[offOpcode];
2509 }
2510# endif
2511 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2512}
2513#endif /* IEM_WITH_SETJMP */
2514
2515/**
2516 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2517 * on failure.
2518 *
2519 * Will note down the position of the ModR/M byte for VT-x exits.
2520 *
2521 * @param a_pbRm Where to return the RM opcode byte.
2522 * @remark Implicitly references pVCpu.
2523 */
2524#ifndef IEM_WITH_SETJMP
2525# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2526 do \
2527 { \
2528 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2529 if (rcStrict2 == VINF_SUCCESS) \
2530 { /* likely */ } \
2531 else \
2532 return rcStrict2; \
2533 } while (0)
2534#else
2535# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2536#endif /* IEM_WITH_SETJMP */
2537
2538
2539#ifndef IEM_WITH_SETJMP
2540
2541/**
2542 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2543 *
2544 * @returns Strict VBox status code.
2545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2546 * @param pu16 Where to return the opcode word.
2547 */
2548DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2549{
2550 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2551 if (rcStrict == VINF_SUCCESS)
2552 {
2553 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2554# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2555 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2556# else
2557 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2558# endif
2559 pVCpu->iem.s.offOpcode = offOpcode + 2;
2560 }
2561 else
2562 *pu16 = 0;
2563 return rcStrict;
2564}
2565
2566
2567/**
2568 * Fetches the next opcode word.
2569 *
2570 * @returns Strict VBox status code.
2571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2572 * @param pu16 Where to return the opcode word.
2573 */
2574DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2575{
2576 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2577 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2578 {
2579 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2580# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2581 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2582# else
2583 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2584# endif
2585 return VINF_SUCCESS;
2586 }
2587 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2588}
2589
2590#else /* IEM_WITH_SETJMP */
2591
2592/**
2593 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2594 *
2595 * @returns The opcode word.
2596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2597 */
2598DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2599{
2600# ifdef IEM_WITH_CODE_TLB
2601 uint16_t u16;
2602 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2603 return u16;
2604# else
2605 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2606 if (rcStrict == VINF_SUCCESS)
2607 {
2608 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2609 pVCpu->iem.s.offOpcode += 2;
2610# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2611 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2612# else
2613 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2614# endif
2615 }
2616 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2617# endif
2618}
2619
2620
2621/**
2622 * Fetches the next opcode word, longjmp on error.
2623 *
2624 * @returns The opcode word.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 */
2627DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2628{
2629# ifdef IEM_WITH_CODE_TLB
2630 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2631 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2632 if (RT_LIKELY( pbBuf != NULL
2633 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2634 {
2635 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2636# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2637 return *(uint16_t const *)&pbBuf[offBuf];
2638# else
2639 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2640# endif
2641 }
2642# else
2643 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2644 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2645 {
2646 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2647# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2648 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2649# else
2650 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2651# endif
2652 }
2653# endif
2654 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2655}
2656
2657#endif /* IEM_WITH_SETJMP */
2658
2659
2660/**
2661 * Fetches the next opcode word, returns automatically on failure.
2662 *
2663 * @param a_pu16 Where to return the opcode word.
2664 * @remark Implicitly references pVCpu.
2665 */
2666#ifndef IEM_WITH_SETJMP
2667# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2668 do \
2669 { \
2670 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2671 if (rcStrict2 != VINF_SUCCESS) \
2672 return rcStrict2; \
2673 } while (0)
2674#else
2675# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2676#endif
2677
2678#ifndef IEM_WITH_SETJMP
2679
2680/**
2681 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2682 *
2683 * @returns Strict VBox status code.
2684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2685 * @param pu32 Where to return the opcode double word.
2686 */
2687DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2688{
2689 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2690 if (rcStrict == VINF_SUCCESS)
2691 {
2692 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2693 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2694 pVCpu->iem.s.offOpcode = offOpcode + 2;
2695 }
2696 else
2697 *pu32 = 0;
2698 return rcStrict;
2699}
2700
2701
2702/**
2703 * Fetches the next opcode word, zero extending it to a double word.
2704 *
2705 * @returns Strict VBox status code.
2706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2707 * @param pu32 Where to return the opcode double word.
2708 */
2709DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2710{
2711 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2712 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2713 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2714
2715 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2716 pVCpu->iem.s.offOpcode = offOpcode + 2;
2717 return VINF_SUCCESS;
2718}
2719
2720#endif /* !IEM_WITH_SETJMP */
2721
2722
2723/**
2724 * Fetches the next opcode word and zero extends it to a double word, returns
2725 * automatically on failure.
2726 *
2727 * @param a_pu32 Where to return the opcode double word.
2728 * @remark Implicitly references pVCpu.
2729 */
2730#ifndef IEM_WITH_SETJMP
2731# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2732 do \
2733 { \
2734 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2735 if (rcStrict2 != VINF_SUCCESS) \
2736 return rcStrict2; \
2737 } while (0)
2738#else
2739# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2740#endif
2741
2742#ifndef IEM_WITH_SETJMP
2743
2744/**
2745 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2746 *
2747 * @returns Strict VBox status code.
2748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2749 * @param pu64 Where to return the opcode quad word.
2750 */
2751DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2752{
2753 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2754 if (rcStrict == VINF_SUCCESS)
2755 {
2756 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2757 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2758 pVCpu->iem.s.offOpcode = offOpcode + 2;
2759 }
2760 else
2761 *pu64 = 0;
2762 return rcStrict;
2763}
2764
2765
2766/**
2767 * Fetches the next opcode word, zero extending it to a quad word.
2768 *
2769 * @returns Strict VBox status code.
2770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2771 * @param pu64 Where to return the opcode quad word.
2772 */
2773DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2774{
2775 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2776 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2777 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2778
2779 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2780 pVCpu->iem.s.offOpcode = offOpcode + 2;
2781 return VINF_SUCCESS;
2782}
2783
2784#endif /* !IEM_WITH_SETJMP */
2785
2786/**
2787 * Fetches the next opcode word and zero extends it to a quad word, returns
2788 * automatically on failure.
2789 *
2790 * @param a_pu64 Where to return the opcode quad word.
2791 * @remark Implicitly references pVCpu.
2792 */
2793#ifndef IEM_WITH_SETJMP
2794# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2795 do \
2796 { \
2797 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2798 if (rcStrict2 != VINF_SUCCESS) \
2799 return rcStrict2; \
2800 } while (0)
2801#else
2802# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2803#endif
2804
2805
2806#ifndef IEM_WITH_SETJMP
2807/**
2808 * Fetches the next signed word from the opcode stream.
2809 *
2810 * @returns Strict VBox status code.
2811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2812 * @param pi16 Where to return the signed word.
2813 */
2814DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2815{
2816 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2817}
2818#endif /* !IEM_WITH_SETJMP */
2819
2820
2821/**
2822 * Fetches the next signed word from the opcode stream, returning automatically
2823 * on failure.
2824 *
2825 * @param a_pi16 Where to return the signed word.
2826 * @remark Implicitly references pVCpu.
2827 */
2828#ifndef IEM_WITH_SETJMP
2829# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2830 do \
2831 { \
2832 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2833 if (rcStrict2 != VINF_SUCCESS) \
2834 return rcStrict2; \
2835 } while (0)
2836#else
2837# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2838#endif
2839
2840#ifndef IEM_WITH_SETJMP
2841
2842/**
2843 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2844 *
2845 * @returns Strict VBox status code.
2846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2847 * @param pu32 Where to return the opcode dword.
2848 */
2849DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2850{
2851 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2852 if (rcStrict == VINF_SUCCESS)
2853 {
2854 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2855# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2856 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2857# else
2858 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2859 pVCpu->iem.s.abOpcode[offOpcode + 1],
2860 pVCpu->iem.s.abOpcode[offOpcode + 2],
2861 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2862# endif
2863 pVCpu->iem.s.offOpcode = offOpcode + 4;
2864 }
2865 else
2866 *pu32 = 0;
2867 return rcStrict;
2868}
2869
2870
2871/**
2872 * Fetches the next opcode dword.
2873 *
2874 * @returns Strict VBox status code.
2875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2876 * @param pu32 Where to return the opcode double word.
2877 */
2878DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2879{
2880 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2881 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2882 {
2883 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2884# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2885 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2886# else
2887 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2888 pVCpu->iem.s.abOpcode[offOpcode + 1],
2889 pVCpu->iem.s.abOpcode[offOpcode + 2],
2890 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2891# endif
2892 return VINF_SUCCESS;
2893 }
2894 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2895}
2896
2897#else /* !IEM_WITH_SETJMP */
2898
2899/**
2900 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2901 *
2902 * @returns The opcode dword.
2903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2904 */
2905DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2906{
2907# ifdef IEM_WITH_CODE_TLB
2908 uint32_t u32;
2909 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2910 return u32;
2911# else
2912 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2913 if (rcStrict == VINF_SUCCESS)
2914 {
2915 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2916 pVCpu->iem.s.offOpcode = offOpcode + 4;
2917# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2918 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2919# else
2920 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2921 pVCpu->iem.s.abOpcode[offOpcode + 1],
2922 pVCpu->iem.s.abOpcode[offOpcode + 2],
2923 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2924# endif
2925 }
2926 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2927# endif
2928}
2929
2930
2931/**
2932 * Fetches the next opcode dword, longjmp on error.
2933 *
2934 * @returns The opcode dword.
2935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2936 */
2937DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2938{
2939# ifdef IEM_WITH_CODE_TLB
2940 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2941 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2942 if (RT_LIKELY( pbBuf != NULL
2943 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2944 {
2945 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2946# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2947 return *(uint32_t const *)&pbBuf[offBuf];
2948# else
2949 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2950 pbBuf[offBuf + 1],
2951 pbBuf[offBuf + 2],
2952 pbBuf[offBuf + 3]);
2953# endif
2954 }
2955# else
2956 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2957 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2958 {
2959 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2960# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2961 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2962# else
2963 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2964 pVCpu->iem.s.abOpcode[offOpcode + 1],
2965 pVCpu->iem.s.abOpcode[offOpcode + 2],
2966 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2967# endif
2968 }
2969# endif
2970 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2971}
2972
2973#endif /* !IEM_WITH_SETJMP */
2974
2975
2976/**
2977 * Fetches the next opcode dword, returns automatically on failure.
2978 *
2979 * @param a_pu32 Where to return the opcode dword.
2980 * @remark Implicitly references pVCpu.
2981 */
2982#ifndef IEM_WITH_SETJMP
2983# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2984 do \
2985 { \
2986 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2987 if (rcStrict2 != VINF_SUCCESS) \
2988 return rcStrict2; \
2989 } while (0)
2990#else
2991# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2992#endif
2993
2994#ifndef IEM_WITH_SETJMP
2995
2996/**
2997 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2998 *
2999 * @returns Strict VBox status code.
3000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3001 * @param pu64 Where to return the opcode dword.
3002 */
3003DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3004{
3005 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3006 if (rcStrict == VINF_SUCCESS)
3007 {
3008 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3009 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3010 pVCpu->iem.s.abOpcode[offOpcode + 1],
3011 pVCpu->iem.s.abOpcode[offOpcode + 2],
3012 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3013 pVCpu->iem.s.offOpcode = offOpcode + 4;
3014 }
3015 else
3016 *pu64 = 0;
3017 return rcStrict;
3018}
3019
3020
3021/**
3022 * Fetches the next opcode dword, zero extending it to a quad word.
3023 *
3024 * @returns Strict VBox status code.
3025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3026 * @param pu64 Where to return the opcode quad word.
3027 */
3028DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3029{
3030 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3031 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3032 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3033
3034 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3035 pVCpu->iem.s.abOpcode[offOpcode + 1],
3036 pVCpu->iem.s.abOpcode[offOpcode + 2],
3037 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3038 pVCpu->iem.s.offOpcode = offOpcode + 4;
3039 return VINF_SUCCESS;
3040}
3041
3042#endif /* !IEM_WITH_SETJMP */
3043
3044
3045/**
3046 * Fetches the next opcode dword and zero extends it to a quad word, returns
3047 * automatically on failure.
3048 *
3049 * @param a_pu64 Where to return the opcode quad word.
3050 * @remark Implicitly references pVCpu.
3051 */
3052#ifndef IEM_WITH_SETJMP
3053# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3054 do \
3055 { \
3056 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3057 if (rcStrict2 != VINF_SUCCESS) \
3058 return rcStrict2; \
3059 } while (0)
3060#else
3061# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3062#endif
3063
3064
3065#ifndef IEM_WITH_SETJMP
3066/**
3067 * Fetches the next signed double word from the opcode stream.
3068 *
3069 * @returns Strict VBox status code.
3070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3071 * @param pi32 Where to return the signed double word.
3072 */
3073DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3074{
3075 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3076}
3077#endif
3078
3079/**
3080 * Fetches the next signed double word from the opcode stream, returning
3081 * automatically on failure.
3082 *
3083 * @param a_pi32 Where to return the signed double word.
3084 * @remark Implicitly references pVCpu.
3085 */
3086#ifndef IEM_WITH_SETJMP
3087# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3088 do \
3089 { \
3090 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3091 if (rcStrict2 != VINF_SUCCESS) \
3092 return rcStrict2; \
3093 } while (0)
3094#else
3095# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3096#endif
3097
3098#ifndef IEM_WITH_SETJMP
3099
3100/**
3101 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3102 *
3103 * @returns Strict VBox status code.
3104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3105 * @param pu64 Where to return the opcode qword.
3106 */
3107DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3108{
3109 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3110 if (rcStrict == VINF_SUCCESS)
3111 {
3112 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3113 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3114 pVCpu->iem.s.abOpcode[offOpcode + 1],
3115 pVCpu->iem.s.abOpcode[offOpcode + 2],
3116 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3117 pVCpu->iem.s.offOpcode = offOpcode + 4;
3118 }
3119 else
3120 *pu64 = 0;
3121 return rcStrict;
3122}
3123
3124
3125/**
3126 * Fetches the next opcode dword, sign extending it into a quad word.
3127 *
3128 * @returns Strict VBox status code.
3129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3130 * @param pu64 Where to return the opcode quad word.
3131 */
3132DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3133{
3134 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3135 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3136 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3137
3138 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3139 pVCpu->iem.s.abOpcode[offOpcode + 1],
3140 pVCpu->iem.s.abOpcode[offOpcode + 2],
3141 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3142 *pu64 = i32;
3143 pVCpu->iem.s.offOpcode = offOpcode + 4;
3144 return VINF_SUCCESS;
3145}
3146
3147#endif /* !IEM_WITH_SETJMP */
3148
3149
3150/**
3151 * Fetches the next opcode double word and sign extends it to a quad word,
3152 * returns automatically on failure.
3153 *
3154 * @param a_pu64 Where to return the opcode quad word.
3155 * @remark Implicitly references pVCpu.
3156 */
3157#ifndef IEM_WITH_SETJMP
3158# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3159 do \
3160 { \
3161 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3162 if (rcStrict2 != VINF_SUCCESS) \
3163 return rcStrict2; \
3164 } while (0)
3165#else
3166# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3167#endif
3168
3169#ifndef IEM_WITH_SETJMP
3170
3171/**
3172 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3173 *
3174 * @returns Strict VBox status code.
3175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3176 * @param pu64 Where to return the opcode qword.
3177 */
3178DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3179{
3180 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3181 if (rcStrict == VINF_SUCCESS)
3182 {
3183 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3184# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3185 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3186# else
3187 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3188 pVCpu->iem.s.abOpcode[offOpcode + 1],
3189 pVCpu->iem.s.abOpcode[offOpcode + 2],
3190 pVCpu->iem.s.abOpcode[offOpcode + 3],
3191 pVCpu->iem.s.abOpcode[offOpcode + 4],
3192 pVCpu->iem.s.abOpcode[offOpcode + 5],
3193 pVCpu->iem.s.abOpcode[offOpcode + 6],
3194 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3195# endif
3196 pVCpu->iem.s.offOpcode = offOpcode + 8;
3197 }
3198 else
3199 *pu64 = 0;
3200 return rcStrict;
3201}
3202
3203
3204/**
3205 * Fetches the next opcode qword.
3206 *
3207 * @returns Strict VBox status code.
3208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3209 * @param pu64 Where to return the opcode qword.
3210 */
3211DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3212{
3213 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3214 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3215 {
3216# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3217 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3218# else
3219 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3220 pVCpu->iem.s.abOpcode[offOpcode + 1],
3221 pVCpu->iem.s.abOpcode[offOpcode + 2],
3222 pVCpu->iem.s.abOpcode[offOpcode + 3],
3223 pVCpu->iem.s.abOpcode[offOpcode + 4],
3224 pVCpu->iem.s.abOpcode[offOpcode + 5],
3225 pVCpu->iem.s.abOpcode[offOpcode + 6],
3226 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3227# endif
3228 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3229 return VINF_SUCCESS;
3230 }
3231 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3232}
3233
3234#else /* IEM_WITH_SETJMP */
3235
3236/**
3237 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3238 *
3239 * @returns The opcode qword.
3240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3241 */
3242DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3243{
3244# ifdef IEM_WITH_CODE_TLB
3245 uint64_t u64;
3246 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3247 return u64;
3248# else
3249 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3250 if (rcStrict == VINF_SUCCESS)
3251 {
3252 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3253 pVCpu->iem.s.offOpcode = offOpcode + 8;
3254# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3255 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3256# else
3257 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3258 pVCpu->iem.s.abOpcode[offOpcode + 1],
3259 pVCpu->iem.s.abOpcode[offOpcode + 2],
3260 pVCpu->iem.s.abOpcode[offOpcode + 3],
3261 pVCpu->iem.s.abOpcode[offOpcode + 4],
3262 pVCpu->iem.s.abOpcode[offOpcode + 5],
3263 pVCpu->iem.s.abOpcode[offOpcode + 6],
3264 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3265# endif
3266 }
3267 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3268# endif
3269}
3270
3271
3272/**
3273 * Fetches the next opcode qword, longjmp on error.
3274 *
3275 * @returns The opcode qword.
3276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3277 */
3278DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3279{
3280# ifdef IEM_WITH_CODE_TLB
3281 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3282 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3283 if (RT_LIKELY( pbBuf != NULL
3284 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3285 {
3286 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3287# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3288 return *(uint64_t const *)&pbBuf[offBuf];
3289# else
3290 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3291 pbBuf[offBuf + 1],
3292 pbBuf[offBuf + 2],
3293 pbBuf[offBuf + 3],
3294 pbBuf[offBuf + 4],
3295 pbBuf[offBuf + 5],
3296 pbBuf[offBuf + 6],
3297 pbBuf[offBuf + 7]);
3298# endif
3299 }
3300# else
3301 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3302 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3303 {
3304 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3305# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3306 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3307# else
3308 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3309 pVCpu->iem.s.abOpcode[offOpcode + 1],
3310 pVCpu->iem.s.abOpcode[offOpcode + 2],
3311 pVCpu->iem.s.abOpcode[offOpcode + 3],
3312 pVCpu->iem.s.abOpcode[offOpcode + 4],
3313 pVCpu->iem.s.abOpcode[offOpcode + 5],
3314 pVCpu->iem.s.abOpcode[offOpcode + 6],
3315 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3316# endif
3317 }
3318# endif
3319 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3320}
3321
3322#endif /* IEM_WITH_SETJMP */
3323
3324/**
3325 * Fetches the next opcode quad word, returns automatically on failure.
3326 *
3327 * @param a_pu64 Where to return the opcode quad word.
3328 * @remark Implicitly references pVCpu.
3329 */
3330#ifndef IEM_WITH_SETJMP
3331# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3332 do \
3333 { \
3334 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3335 if (rcStrict2 != VINF_SUCCESS) \
3336 return rcStrict2; \
3337 } while (0)
3338#else
3339# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3340#endif
3341
3342
3343/** @name Misc Worker Functions.
3344 * @{
3345 */
3346
3347/**
3348 * Gets the exception class for the specified exception vector.
3349 *
3350 * @returns The class of the specified exception.
3351 * @param uVector The exception vector.
3352 */
3353IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3354{
3355 Assert(uVector <= X86_XCPT_LAST);
3356 switch (uVector)
3357 {
3358 case X86_XCPT_DE:
3359 case X86_XCPT_TS:
3360 case X86_XCPT_NP:
3361 case X86_XCPT_SS:
3362 case X86_XCPT_GP:
3363 case X86_XCPT_SX: /* AMD only */
3364 return IEMXCPTCLASS_CONTRIBUTORY;
3365
3366 case X86_XCPT_PF:
3367 case X86_XCPT_VE: /* Intel only */
3368 return IEMXCPTCLASS_PAGE_FAULT;
3369
3370 case X86_XCPT_DF:
3371 return IEMXCPTCLASS_DOUBLE_FAULT;
3372 }
3373 return IEMXCPTCLASS_BENIGN;
3374}
3375
3376
3377/**
3378 * Evaluates how to handle an exception caused during delivery of another event
3379 * (exception / interrupt).
3380 *
3381 * @returns How to handle the recursive exception.
3382 * @param pVCpu The cross context virtual CPU structure of the
3383 * calling thread.
3384 * @param fPrevFlags The flags of the previous event.
3385 * @param uPrevVector The vector of the previous event.
3386 * @param fCurFlags The flags of the current exception.
3387 * @param uCurVector The vector of the current exception.
3388 * @param pfXcptRaiseInfo Where to store additional information about the
3389 * exception condition. Optional.
3390 */
3391VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3392 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3393{
3394 /*
3395 * Only CPU exceptions can be raised while delivering other events, software interrupt
3396 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3397 */
3398 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3399 Assert(pVCpu); RT_NOREF(pVCpu);
3400 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3401
3402 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3403 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3404 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3405 {
3406 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3407 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3408 {
3409 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3410 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3411 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3412 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3413 {
3414 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3415 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3416 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3417 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3418 uCurVector, pVCpu->cpum.GstCtx.cr2));
3419 }
3420 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3421 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3422 {
3423 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3424 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3425 }
3426 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3427 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3428 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3429 {
3430 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3431 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3432 }
3433 }
3434 else
3435 {
3436 if (uPrevVector == X86_XCPT_NMI)
3437 {
3438 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3439 if (uCurVector == X86_XCPT_PF)
3440 {
3441 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3442 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3443 }
3444 }
3445 else if ( uPrevVector == X86_XCPT_AC
3446 && uCurVector == X86_XCPT_AC)
3447 {
3448 enmRaise = IEMXCPTRAISE_CPU_HANG;
3449 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3450 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3451 }
3452 }
3453 }
3454 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3455 {
3456 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3457 if (uCurVector == X86_XCPT_PF)
3458 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3459 }
3460 else
3461 {
3462 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3463 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3464 }
3465
3466 if (pfXcptRaiseInfo)
3467 *pfXcptRaiseInfo = fRaiseInfo;
3468 return enmRaise;
3469}
3470
3471
3472/**
3473 * Enters the CPU shutdown state initiated by a triple fault or other
3474 * unrecoverable conditions.
3475 *
3476 * @returns Strict VBox status code.
3477 * @param pVCpu The cross context virtual CPU structure of the
3478 * calling thread.
3479 */
3480IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3481{
3482 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3483 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3484
3485 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3486 {
3487 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3488 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3489 }
3490
3491 RT_NOREF(pVCpu);
3492 return VINF_EM_TRIPLE_FAULT;
3493}
3494
3495
3496/**
3497 * Validates a new SS segment.
3498 *
3499 * @returns VBox strict status code.
3500 * @param pVCpu The cross context virtual CPU structure of the
3501 * calling thread.
3502 * @param NewSS The new SS selctor.
3503 * @param uCpl The CPL to load the stack for.
3504 * @param pDesc Where to return the descriptor.
3505 */
3506IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3507{
3508 /* Null selectors are not allowed (we're not called for dispatching
3509 interrupts with SS=0 in long mode). */
3510 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3511 {
3512 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3513 return iemRaiseTaskSwitchFault0(pVCpu);
3514 }
3515
3516 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3517 if ((NewSS & X86_SEL_RPL) != uCpl)
3518 {
3519 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3520 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3521 }
3522
3523 /*
3524 * Read the descriptor.
3525 */
3526 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3527 if (rcStrict != VINF_SUCCESS)
3528 return rcStrict;
3529
3530 /*
3531 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3532 */
3533 if (!pDesc->Legacy.Gen.u1DescType)
3534 {
3535 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3536 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3537 }
3538
3539 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3540 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3541 {
3542 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3543 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3544 }
3545 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3546 {
3547 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3548 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3549 }
3550
3551 /* Is it there? */
3552 /** @todo testcase: Is this checked before the canonical / limit check below? */
3553 if (!pDesc->Legacy.Gen.u1Present)
3554 {
3555 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3556 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3557 }
3558
3559 return VINF_SUCCESS;
3560}
3561
3562
3563/**
3564 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3565 * not.
3566 *
3567 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3568 */
3569#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3570# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3571#else
3572# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3573#endif
3574
3575/**
3576 * Updates the EFLAGS in the correct manner wrt. PATM.
3577 *
3578 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3579 * @param a_fEfl The new EFLAGS.
3580 */
3581#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3582# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3583#else
3584# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3585#endif
3586
3587
3588/** @} */
3589
3590/** @name Raising Exceptions.
3591 *
3592 * @{
3593 */
3594
3595
3596/**
3597 * Loads the specified stack far pointer from the TSS.
3598 *
3599 * @returns VBox strict status code.
3600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3601 * @param uCpl The CPL to load the stack for.
3602 * @param pSelSS Where to return the new stack segment.
3603 * @param puEsp Where to return the new stack pointer.
3604 */
3605IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3606{
3607 VBOXSTRICTRC rcStrict;
3608 Assert(uCpl < 4);
3609
3610 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3611 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3612 {
3613 /*
3614 * 16-bit TSS (X86TSS16).
3615 */
3616 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3617 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3618 {
3619 uint32_t off = uCpl * 4 + 2;
3620 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3621 {
3622 /** @todo check actual access pattern here. */
3623 uint32_t u32Tmp = 0; /* gcc maybe... */
3624 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3625 if (rcStrict == VINF_SUCCESS)
3626 {
3627 *puEsp = RT_LOWORD(u32Tmp);
3628 *pSelSS = RT_HIWORD(u32Tmp);
3629 return VINF_SUCCESS;
3630 }
3631 }
3632 else
3633 {
3634 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3635 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3636 }
3637 break;
3638 }
3639
3640 /*
3641 * 32-bit TSS (X86TSS32).
3642 */
3643 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3644 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3645 {
3646 uint32_t off = uCpl * 8 + 4;
3647 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3648 {
3649/** @todo check actual access pattern here. */
3650 uint64_t u64Tmp;
3651 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3652 if (rcStrict == VINF_SUCCESS)
3653 {
3654 *puEsp = u64Tmp & UINT32_MAX;
3655 *pSelSS = (RTSEL)(u64Tmp >> 32);
3656 return VINF_SUCCESS;
3657 }
3658 }
3659 else
3660 {
3661 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3662 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3663 }
3664 break;
3665 }
3666
3667 default:
3668 AssertFailed();
3669 rcStrict = VERR_IEM_IPE_4;
3670 break;
3671 }
3672
3673 *puEsp = 0; /* make gcc happy */
3674 *pSelSS = 0; /* make gcc happy */
3675 return rcStrict;
3676}
3677
3678
3679/**
3680 * Loads the specified stack pointer from the 64-bit TSS.
3681 *
3682 * @returns VBox strict status code.
3683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3684 * @param uCpl The CPL to load the stack for.
3685 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3686 * @param puRsp Where to return the new stack pointer.
3687 */
3688IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3689{
3690 Assert(uCpl < 4);
3691 Assert(uIst < 8);
3692 *puRsp = 0; /* make gcc happy */
3693
3694 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3695 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3696
3697 uint32_t off;
3698 if (uIst)
3699 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3700 else
3701 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3702 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3703 {
3704 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3705 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3706 }
3707
3708 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3709}
3710
3711
3712/**
3713 * Adjust the CPU state according to the exception being raised.
3714 *
3715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3716 * @param u8Vector The exception that has been raised.
3717 */
3718DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3719{
3720 switch (u8Vector)
3721 {
3722 case X86_XCPT_DB:
3723 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3724 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3725 break;
3726 /** @todo Read the AMD and Intel exception reference... */
3727 }
3728}
3729
3730
3731/**
3732 * Implements exceptions and interrupts for real mode.
3733 *
3734 * @returns VBox strict status code.
3735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3736 * @param cbInstr The number of bytes to offset rIP by in the return
3737 * address.
3738 * @param u8Vector The interrupt / exception vector number.
3739 * @param fFlags The flags.
3740 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3741 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3742 */
3743IEM_STATIC VBOXSTRICTRC
3744iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3745 uint8_t cbInstr,
3746 uint8_t u8Vector,
3747 uint32_t fFlags,
3748 uint16_t uErr,
3749 uint64_t uCr2)
3750{
3751 NOREF(uErr); NOREF(uCr2);
3752 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3753
3754 /*
3755 * Read the IDT entry.
3756 */
3757 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3758 {
3759 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3760 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3761 }
3762 RTFAR16 Idte;
3763 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3764 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3765 {
3766 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3767 return rcStrict;
3768 }
3769
3770 /*
3771 * Push the stack frame.
3772 */
3773 uint16_t *pu16Frame;
3774 uint64_t uNewRsp;
3775 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3776 if (rcStrict != VINF_SUCCESS)
3777 return rcStrict;
3778
3779 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3780#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3781 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3782 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3783 fEfl |= UINT16_C(0xf000);
3784#endif
3785 pu16Frame[2] = (uint16_t)fEfl;
3786 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3787 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3788 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3789 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3790 return rcStrict;
3791
3792 /*
3793 * Load the vector address into cs:ip and make exception specific state
3794 * adjustments.
3795 */
3796 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3797 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3798 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3799 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3800 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3801 pVCpu->cpum.GstCtx.rip = Idte.off;
3802 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3803 IEMMISC_SET_EFL(pVCpu, fEfl);
3804
3805 /** @todo do we actually do this in real mode? */
3806 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3807 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3808
3809 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3810}
3811
3812
3813/**
3814 * Loads a NULL data selector into when coming from V8086 mode.
3815 *
3816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3817 * @param pSReg Pointer to the segment register.
3818 */
3819IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3820{
3821 pSReg->Sel = 0;
3822 pSReg->ValidSel = 0;
3823 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3824 {
3825 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3826 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3827 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3828 }
3829 else
3830 {
3831 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3832 /** @todo check this on AMD-V */
3833 pSReg->u64Base = 0;
3834 pSReg->u32Limit = 0;
3835 }
3836}
3837
3838
3839/**
3840 * Loads a segment selector during a task switch in V8086 mode.
3841 *
3842 * @param pSReg Pointer to the segment register.
3843 * @param uSel The selector value to load.
3844 */
3845IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3846{
3847 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3848 pSReg->Sel = uSel;
3849 pSReg->ValidSel = uSel;
3850 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3851 pSReg->u64Base = uSel << 4;
3852 pSReg->u32Limit = 0xffff;
3853 pSReg->Attr.u = 0xf3;
3854}
3855
3856
3857/**
3858 * Loads a NULL data selector into a selector register, both the hidden and
3859 * visible parts, in protected mode.
3860 *
3861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3862 * @param pSReg Pointer to the segment register.
3863 * @param uRpl The RPL.
3864 */
3865IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3866{
3867 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3868 * data selector in protected mode. */
3869 pSReg->Sel = uRpl;
3870 pSReg->ValidSel = uRpl;
3871 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3872 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3873 {
3874 /* VT-x (Intel 3960x) observed doing something like this. */
3875 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3876 pSReg->u32Limit = UINT32_MAX;
3877 pSReg->u64Base = 0;
3878 }
3879 else
3880 {
3881 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3882 pSReg->u32Limit = 0;
3883 pSReg->u64Base = 0;
3884 }
3885}
3886
3887
3888/**
3889 * Loads a segment selector during a task switch in protected mode.
3890 *
3891 * In this task switch scenario, we would throw \#TS exceptions rather than
3892 * \#GPs.
3893 *
3894 * @returns VBox strict status code.
3895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3896 * @param pSReg Pointer to the segment register.
3897 * @param uSel The new selector value.
3898 *
3899 * @remarks This does _not_ handle CS or SS.
3900 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3901 */
3902IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3903{
3904 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3905
3906 /* Null data selector. */
3907 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3908 {
3909 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3910 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3911 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3912 return VINF_SUCCESS;
3913 }
3914
3915 /* Fetch the descriptor. */
3916 IEMSELDESC Desc;
3917 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3918 if (rcStrict != VINF_SUCCESS)
3919 {
3920 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3921 VBOXSTRICTRC_VAL(rcStrict)));
3922 return rcStrict;
3923 }
3924
3925 /* Must be a data segment or readable code segment. */
3926 if ( !Desc.Legacy.Gen.u1DescType
3927 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3928 {
3929 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3930 Desc.Legacy.Gen.u4Type));
3931 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3932 }
3933
3934 /* Check privileges for data segments and non-conforming code segments. */
3935 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3936 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3937 {
3938 /* The RPL and the new CPL must be less than or equal to the DPL. */
3939 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3940 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3941 {
3942 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3943 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3944 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3945 }
3946 }
3947
3948 /* Is it there? */
3949 if (!Desc.Legacy.Gen.u1Present)
3950 {
3951 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3952 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3953 }
3954
3955 /* The base and limit. */
3956 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3957 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3958
3959 /*
3960 * Ok, everything checked out fine. Now set the accessed bit before
3961 * committing the result into the registers.
3962 */
3963 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3964 {
3965 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3966 if (rcStrict != VINF_SUCCESS)
3967 return rcStrict;
3968 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3969 }
3970
3971 /* Commit */
3972 pSReg->Sel = uSel;
3973 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3974 pSReg->u32Limit = cbLimit;
3975 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3976 pSReg->ValidSel = uSel;
3977 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3978 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3979 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3980
3981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3982 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3983 return VINF_SUCCESS;
3984}
3985
3986
3987/**
3988 * Performs a task switch.
3989 *
3990 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3991 * caller is responsible for performing the necessary checks (like DPL, TSS
3992 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3993 * reference for JMP, CALL, IRET.
3994 *
3995 * If the task switch is the due to a software interrupt or hardware exception,
3996 * the caller is responsible for validating the TSS selector and descriptor. See
3997 * Intel Instruction reference for INT n.
3998 *
3999 * @returns VBox strict status code.
4000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4001 * @param enmTaskSwitch The cause of the task switch.
4002 * @param uNextEip The EIP effective after the task switch.
4003 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4004 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4005 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4006 * @param SelTSS The TSS selector of the new task.
4007 * @param pNewDescTSS Pointer to the new TSS descriptor.
4008 */
4009IEM_STATIC VBOXSTRICTRC
4010iemTaskSwitch(PVMCPU pVCpu,
4011 IEMTASKSWITCH enmTaskSwitch,
4012 uint32_t uNextEip,
4013 uint32_t fFlags,
4014 uint16_t uErr,
4015 uint64_t uCr2,
4016 RTSEL SelTSS,
4017 PIEMSELDESC pNewDescTSS)
4018{
4019 Assert(!IEM_IS_REAL_MODE(pVCpu));
4020 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4021 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4022
4023 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4024 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4025 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4026 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4027 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4028
4029 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4030 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4031
4032 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4033 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4034
4035 /* Update CR2 in case it's a page-fault. */
4036 /** @todo This should probably be done much earlier in IEM/PGM. See
4037 * @bugref{5653#c49}. */
4038 if (fFlags & IEM_XCPT_FLAGS_CR2)
4039 pVCpu->cpum.GstCtx.cr2 = uCr2;
4040
4041 /*
4042 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4043 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4044 */
4045 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4046 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4047 if (uNewTSSLimit < uNewTSSLimitMin)
4048 {
4049 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4050 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4051 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4052 }
4053
4054 /*
4055 * Task switches in VMX non-root mode always cause task switches.
4056 * The new TSS must have been read and validated (DPL, limits etc.) before a
4057 * task-switch VM-exit commences.
4058 *
4059 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4060 */
4061 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4062 {
4063 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4064 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4065 }
4066
4067 /*
4068 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4069 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4070 */
4071 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4072 {
4073 uint32_t const uExitInfo1 = SelTSS;
4074 uint32_t uExitInfo2 = uErr;
4075 switch (enmTaskSwitch)
4076 {
4077 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4078 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4079 default: break;
4080 }
4081 if (fFlags & IEM_XCPT_FLAGS_ERR)
4082 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4083 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4084 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4085
4086 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4087 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4088 RT_NOREF2(uExitInfo1, uExitInfo2);
4089 }
4090
4091 /*
4092 * Check the current TSS limit. The last written byte to the current TSS during the
4093 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4094 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4095 *
4096 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4097 * end up with smaller than "legal" TSS limits.
4098 */
4099 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4100 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4101 if (uCurTSSLimit < uCurTSSLimitMin)
4102 {
4103 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4104 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4105 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4106 }
4107
4108 /*
4109 * Verify that the new TSS can be accessed and map it. Map only the required contents
4110 * and not the entire TSS.
4111 */
4112 void *pvNewTSS;
4113 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4114 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4115 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4116 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4117 * not perform correct translation if this happens. See Intel spec. 7.2.1
4118 * "Task-State Segment" */
4119 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4120 if (rcStrict != VINF_SUCCESS)
4121 {
4122 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4123 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4124 return rcStrict;
4125 }
4126
4127 /*
4128 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4129 */
4130 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4131 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4132 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4133 {
4134 PX86DESC pDescCurTSS;
4135 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4136 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4137 if (rcStrict != VINF_SUCCESS)
4138 {
4139 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4140 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4141 return rcStrict;
4142 }
4143
4144 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4145 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4146 if (rcStrict != VINF_SUCCESS)
4147 {
4148 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4149 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4150 return rcStrict;
4151 }
4152
4153 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4154 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4155 {
4156 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4157 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4158 u32EFlags &= ~X86_EFL_NT;
4159 }
4160 }
4161
4162 /*
4163 * Save the CPU state into the current TSS.
4164 */
4165 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4166 if (GCPtrNewTSS == GCPtrCurTSS)
4167 {
4168 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4169 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4170 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4171 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4172 pVCpu->cpum.GstCtx.ldtr.Sel));
4173 }
4174 if (fIsNewTSS386)
4175 {
4176 /*
4177 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4178 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4179 */
4180 void *pvCurTSS32;
4181 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4182 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4183 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4184 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4185 if (rcStrict != VINF_SUCCESS)
4186 {
4187 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4188 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4189 return rcStrict;
4190 }
4191
4192 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4193 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4194 pCurTSS32->eip = uNextEip;
4195 pCurTSS32->eflags = u32EFlags;
4196 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4197 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4198 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4199 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4200 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4201 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4202 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4203 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4204 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4205 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4206 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4207 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4208 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4209 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4210
4211 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4212 if (rcStrict != VINF_SUCCESS)
4213 {
4214 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4215 VBOXSTRICTRC_VAL(rcStrict)));
4216 return rcStrict;
4217 }
4218 }
4219 else
4220 {
4221 /*
4222 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4223 */
4224 void *pvCurTSS16;
4225 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4226 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4227 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4228 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4229 if (rcStrict != VINF_SUCCESS)
4230 {
4231 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4232 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4233 return rcStrict;
4234 }
4235
4236 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4237 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4238 pCurTSS16->ip = uNextEip;
4239 pCurTSS16->flags = u32EFlags;
4240 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4241 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4242 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4243 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4244 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4245 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4246 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4247 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4248 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4249 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4250 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4251 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4252
4253 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4254 if (rcStrict != VINF_SUCCESS)
4255 {
4256 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4257 VBOXSTRICTRC_VAL(rcStrict)));
4258 return rcStrict;
4259 }
4260 }
4261
4262 /*
4263 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4264 */
4265 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4266 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4267 {
4268 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4269 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4270 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4271 }
4272
4273 /*
4274 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4275 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4276 */
4277 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4278 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4279 bool fNewDebugTrap;
4280 if (fIsNewTSS386)
4281 {
4282 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4283 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4284 uNewEip = pNewTSS32->eip;
4285 uNewEflags = pNewTSS32->eflags;
4286 uNewEax = pNewTSS32->eax;
4287 uNewEcx = pNewTSS32->ecx;
4288 uNewEdx = pNewTSS32->edx;
4289 uNewEbx = pNewTSS32->ebx;
4290 uNewEsp = pNewTSS32->esp;
4291 uNewEbp = pNewTSS32->ebp;
4292 uNewEsi = pNewTSS32->esi;
4293 uNewEdi = pNewTSS32->edi;
4294 uNewES = pNewTSS32->es;
4295 uNewCS = pNewTSS32->cs;
4296 uNewSS = pNewTSS32->ss;
4297 uNewDS = pNewTSS32->ds;
4298 uNewFS = pNewTSS32->fs;
4299 uNewGS = pNewTSS32->gs;
4300 uNewLdt = pNewTSS32->selLdt;
4301 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4302 }
4303 else
4304 {
4305 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4306 uNewCr3 = 0;
4307 uNewEip = pNewTSS16->ip;
4308 uNewEflags = pNewTSS16->flags;
4309 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4310 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4311 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4312 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4313 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4314 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4315 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4316 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4317 uNewES = pNewTSS16->es;
4318 uNewCS = pNewTSS16->cs;
4319 uNewSS = pNewTSS16->ss;
4320 uNewDS = pNewTSS16->ds;
4321 uNewFS = 0;
4322 uNewGS = 0;
4323 uNewLdt = pNewTSS16->selLdt;
4324 fNewDebugTrap = false;
4325 }
4326
4327 if (GCPtrNewTSS == GCPtrCurTSS)
4328 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4329 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4330
4331 /*
4332 * We're done accessing the new TSS.
4333 */
4334 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4335 if (rcStrict != VINF_SUCCESS)
4336 {
4337 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4338 return rcStrict;
4339 }
4340
4341 /*
4342 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4343 */
4344 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4345 {
4346 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4347 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4348 if (rcStrict != VINF_SUCCESS)
4349 {
4350 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4351 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4352 return rcStrict;
4353 }
4354
4355 /* Check that the descriptor indicates the new TSS is available (not busy). */
4356 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4357 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4358 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4359
4360 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4361 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4362 if (rcStrict != VINF_SUCCESS)
4363 {
4364 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4365 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4366 return rcStrict;
4367 }
4368 }
4369
4370 /*
4371 * From this point on, we're technically in the new task. We will defer exceptions
4372 * until the completion of the task switch but before executing any instructions in the new task.
4373 */
4374 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4375 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4376 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4377 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4378 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4379 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4380 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4381
4382 /* Set the busy bit in TR. */
4383 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4384 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4385 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4386 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4387 {
4388 uNewEflags |= X86_EFL_NT;
4389 }
4390
4391 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4392 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4393 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4394
4395 pVCpu->cpum.GstCtx.eip = uNewEip;
4396 pVCpu->cpum.GstCtx.eax = uNewEax;
4397 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4398 pVCpu->cpum.GstCtx.edx = uNewEdx;
4399 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4400 pVCpu->cpum.GstCtx.esp = uNewEsp;
4401 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4402 pVCpu->cpum.GstCtx.esi = uNewEsi;
4403 pVCpu->cpum.GstCtx.edi = uNewEdi;
4404
4405 uNewEflags &= X86_EFL_LIVE_MASK;
4406 uNewEflags |= X86_EFL_RA1_MASK;
4407 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4408
4409 /*
4410 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4411 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4412 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4413 */
4414 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4415 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4416
4417 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4418 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4419
4420 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4421 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4422
4423 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4424 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4425
4426 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4427 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4428
4429 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4430 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4431 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4432
4433 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4434 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4435 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4436 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4437
4438 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4439 {
4440 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4441 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4442 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4443 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4444 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4445 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4446 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4447 }
4448
4449 /*
4450 * Switch CR3 for the new task.
4451 */
4452 if ( fIsNewTSS386
4453 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4454 {
4455 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4456 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4457 AssertRCSuccessReturn(rc, rc);
4458
4459 /* Inform PGM. */
4460 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4461 AssertRCReturn(rc, rc);
4462 /* ignore informational status codes */
4463
4464 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4465 }
4466
4467 /*
4468 * Switch LDTR for the new task.
4469 */
4470 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4471 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4472 else
4473 {
4474 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4475
4476 IEMSELDESC DescNewLdt;
4477 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4478 if (rcStrict != VINF_SUCCESS)
4479 {
4480 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4481 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4482 return rcStrict;
4483 }
4484 if ( !DescNewLdt.Legacy.Gen.u1Present
4485 || DescNewLdt.Legacy.Gen.u1DescType
4486 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4487 {
4488 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4489 uNewLdt, DescNewLdt.Legacy.u));
4490 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4491 }
4492
4493 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4494 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4495 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4496 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4497 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4498 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4499 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4500 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4501 }
4502
4503 IEMSELDESC DescSS;
4504 if (IEM_IS_V86_MODE(pVCpu))
4505 {
4506 pVCpu->iem.s.uCpl = 3;
4507 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4508 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4509 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4510 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4511 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4512 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4513
4514 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4515 DescSS.Legacy.u = 0;
4516 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4517 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4518 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4519 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4520 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4521 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4522 DescSS.Legacy.Gen.u2Dpl = 3;
4523 }
4524 else
4525 {
4526 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4527
4528 /*
4529 * Load the stack segment for the new task.
4530 */
4531 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4532 {
4533 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4534 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4535 }
4536
4537 /* Fetch the descriptor. */
4538 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4539 if (rcStrict != VINF_SUCCESS)
4540 {
4541 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4542 VBOXSTRICTRC_VAL(rcStrict)));
4543 return rcStrict;
4544 }
4545
4546 /* SS must be a data segment and writable. */
4547 if ( !DescSS.Legacy.Gen.u1DescType
4548 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4549 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4550 {
4551 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4552 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4553 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4554 }
4555
4556 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4557 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4558 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4559 {
4560 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4561 uNewCpl));
4562 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4563 }
4564
4565 /* Is it there? */
4566 if (!DescSS.Legacy.Gen.u1Present)
4567 {
4568 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4569 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4570 }
4571
4572 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4573 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4574
4575 /* Set the accessed bit before committing the result into SS. */
4576 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4577 {
4578 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4579 if (rcStrict != VINF_SUCCESS)
4580 return rcStrict;
4581 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4582 }
4583
4584 /* Commit SS. */
4585 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4586 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4587 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4588 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4589 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4590 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4591 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4592
4593 /* CPL has changed, update IEM before loading rest of segments. */
4594 pVCpu->iem.s.uCpl = uNewCpl;
4595
4596 /*
4597 * Load the data segments for the new task.
4598 */
4599 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4600 if (rcStrict != VINF_SUCCESS)
4601 return rcStrict;
4602 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4603 if (rcStrict != VINF_SUCCESS)
4604 return rcStrict;
4605 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4606 if (rcStrict != VINF_SUCCESS)
4607 return rcStrict;
4608 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4609 if (rcStrict != VINF_SUCCESS)
4610 return rcStrict;
4611
4612 /*
4613 * Load the code segment for the new task.
4614 */
4615 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4616 {
4617 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4618 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4619 }
4620
4621 /* Fetch the descriptor. */
4622 IEMSELDESC DescCS;
4623 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4624 if (rcStrict != VINF_SUCCESS)
4625 {
4626 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4627 return rcStrict;
4628 }
4629
4630 /* CS must be a code segment. */
4631 if ( !DescCS.Legacy.Gen.u1DescType
4632 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4633 {
4634 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4635 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4636 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4637 }
4638
4639 /* For conforming CS, DPL must be less than or equal to the RPL. */
4640 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4641 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4642 {
4643 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4644 DescCS.Legacy.Gen.u2Dpl));
4645 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4646 }
4647
4648 /* For non-conforming CS, DPL must match RPL. */
4649 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4650 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4651 {
4652 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4653 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4654 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4655 }
4656
4657 /* Is it there? */
4658 if (!DescCS.Legacy.Gen.u1Present)
4659 {
4660 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4661 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4662 }
4663
4664 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4665 u64Base = X86DESC_BASE(&DescCS.Legacy);
4666
4667 /* Set the accessed bit before committing the result into CS. */
4668 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4669 {
4670 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4671 if (rcStrict != VINF_SUCCESS)
4672 return rcStrict;
4673 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4674 }
4675
4676 /* Commit CS. */
4677 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4678 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4679 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4680 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4681 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4682 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4683 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4684 }
4685
4686 /** @todo Debug trap. */
4687 if (fIsNewTSS386 && fNewDebugTrap)
4688 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4689
4690 /*
4691 * Construct the error code masks based on what caused this task switch.
4692 * See Intel Instruction reference for INT.
4693 */
4694 uint16_t uExt;
4695 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4696 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4697 {
4698 uExt = 1;
4699 }
4700 else
4701 uExt = 0;
4702
4703 /*
4704 * Push any error code on to the new stack.
4705 */
4706 if (fFlags & IEM_XCPT_FLAGS_ERR)
4707 {
4708 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4709 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4710 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4711
4712 /* Check that there is sufficient space on the stack. */
4713 /** @todo Factor out segment limit checking for normal/expand down segments
4714 * into a separate function. */
4715 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4716 {
4717 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4718 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4719 {
4720 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4721 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4722 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4723 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4724 }
4725 }
4726 else
4727 {
4728 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4729 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4730 {
4731 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4732 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4733 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4734 }
4735 }
4736
4737
4738 if (fIsNewTSS386)
4739 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4740 else
4741 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4742 if (rcStrict != VINF_SUCCESS)
4743 {
4744 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4745 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4746 return rcStrict;
4747 }
4748 }
4749
4750 /* Check the new EIP against the new CS limit. */
4751 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4752 {
4753 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4754 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4755 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4756 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4757 }
4758
4759 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4760 pVCpu->cpum.GstCtx.ss.Sel));
4761 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4762}
4763
4764
4765/**
4766 * Implements exceptions and interrupts for protected mode.
4767 *
4768 * @returns VBox strict status code.
4769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4770 * @param cbInstr The number of bytes to offset rIP by in the return
4771 * address.
4772 * @param u8Vector The interrupt / exception vector number.
4773 * @param fFlags The flags.
4774 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4775 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4776 */
4777IEM_STATIC VBOXSTRICTRC
4778iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4779 uint8_t cbInstr,
4780 uint8_t u8Vector,
4781 uint32_t fFlags,
4782 uint16_t uErr,
4783 uint64_t uCr2)
4784{
4785 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4786
4787 /*
4788 * Read the IDT entry.
4789 */
4790 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4791 {
4792 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4793 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4794 }
4795 X86DESC Idte;
4796 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4797 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4798 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4799 {
4800 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4801 return rcStrict;
4802 }
4803 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4804 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4805 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4806
4807 /*
4808 * Check the descriptor type, DPL and such.
4809 * ASSUMES this is done in the same order as described for call-gate calls.
4810 */
4811 if (Idte.Gate.u1DescType)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4814 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4815 }
4816 bool fTaskGate = false;
4817 uint8_t f32BitGate = true;
4818 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4819 switch (Idte.Gate.u4Type)
4820 {
4821 case X86_SEL_TYPE_SYS_UNDEFINED:
4822 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4823 case X86_SEL_TYPE_SYS_LDT:
4824 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4825 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4826 case X86_SEL_TYPE_SYS_UNDEFINED2:
4827 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4828 case X86_SEL_TYPE_SYS_UNDEFINED3:
4829 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4830 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4831 case X86_SEL_TYPE_SYS_UNDEFINED4:
4832 {
4833 /** @todo check what actually happens when the type is wrong...
4834 * esp. call gates. */
4835 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4836 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4837 }
4838
4839 case X86_SEL_TYPE_SYS_286_INT_GATE:
4840 f32BitGate = false;
4841 RT_FALL_THRU();
4842 case X86_SEL_TYPE_SYS_386_INT_GATE:
4843 fEflToClear |= X86_EFL_IF;
4844 break;
4845
4846 case X86_SEL_TYPE_SYS_TASK_GATE:
4847 fTaskGate = true;
4848#ifndef IEM_IMPLEMENTS_TASKSWITCH
4849 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4850#endif
4851 break;
4852
4853 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4854 f32BitGate = false;
4855 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4856 break;
4857
4858 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4859 }
4860
4861 /* Check DPL against CPL if applicable. */
4862 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4863 {
4864 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4865 {
4866 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4867 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4868 }
4869 }
4870
4871 /* Is it there? */
4872 if (!Idte.Gate.u1Present)
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4875 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4876 }
4877
4878 /* Is it a task-gate? */
4879 if (fTaskGate)
4880 {
4881 /*
4882 * Construct the error code masks based on what caused this task switch.
4883 * See Intel Instruction reference for INT.
4884 */
4885 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4886 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4887 RTSEL SelTSS = Idte.Gate.u16Sel;
4888
4889 /*
4890 * Fetch the TSS descriptor in the GDT.
4891 */
4892 IEMSELDESC DescTSS;
4893 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4894 if (rcStrict != VINF_SUCCESS)
4895 {
4896 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4897 VBOXSTRICTRC_VAL(rcStrict)));
4898 return rcStrict;
4899 }
4900
4901 /* The TSS descriptor must be a system segment and be available (not busy). */
4902 if ( DescTSS.Legacy.Gen.u1DescType
4903 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4904 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4905 {
4906 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4907 u8Vector, SelTSS, DescTSS.Legacy.au64));
4908 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4909 }
4910
4911 /* The TSS must be present. */
4912 if (!DescTSS.Legacy.Gen.u1Present)
4913 {
4914 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4915 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4916 }
4917
4918 /* Do the actual task switch. */
4919 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4920 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4921 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4922 }
4923
4924 /* A null CS is bad. */
4925 RTSEL NewCS = Idte.Gate.u16Sel;
4926 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4927 {
4928 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4929 return iemRaiseGeneralProtectionFault0(pVCpu);
4930 }
4931
4932 /* Fetch the descriptor for the new CS. */
4933 IEMSELDESC DescCS;
4934 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4935 if (rcStrict != VINF_SUCCESS)
4936 {
4937 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4938 return rcStrict;
4939 }
4940
4941 /* Must be a code segment. */
4942 if (!DescCS.Legacy.Gen.u1DescType)
4943 {
4944 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4945 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4946 }
4947 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4948 {
4949 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4950 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4951 }
4952
4953 /* Don't allow lowering the privilege level. */
4954 /** @todo Does the lowering of privileges apply to software interrupts
4955 * only? This has bearings on the more-privileged or
4956 * same-privilege stack behavior further down. A testcase would
4957 * be nice. */
4958 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4959 {
4960 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4961 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4962 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4963 }
4964
4965 /* Make sure the selector is present. */
4966 if (!DescCS.Legacy.Gen.u1Present)
4967 {
4968 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4969 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4970 }
4971
4972 /* Check the new EIP against the new CS limit. */
4973 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4974 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4975 ? Idte.Gate.u16OffsetLow
4976 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4977 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4978 if (uNewEip > cbLimitCS)
4979 {
4980 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4981 u8Vector, uNewEip, cbLimitCS, NewCS));
4982 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4983 }
4984 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4985
4986 /* Calc the flag image to push. */
4987 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4988 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4989 fEfl &= ~X86_EFL_RF;
4990 else
4991 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4992
4993 /* From V8086 mode only go to CPL 0. */
4994 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4995 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4996 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4997 {
4998 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4999 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5000 }
5001
5002 /*
5003 * If the privilege level changes, we need to get a new stack from the TSS.
5004 * This in turns means validating the new SS and ESP...
5005 */
5006 if (uNewCpl != pVCpu->iem.s.uCpl)
5007 {
5008 RTSEL NewSS;
5009 uint32_t uNewEsp;
5010 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5011 if (rcStrict != VINF_SUCCESS)
5012 return rcStrict;
5013
5014 IEMSELDESC DescSS;
5015 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5016 if (rcStrict != VINF_SUCCESS)
5017 return rcStrict;
5018 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5019 if (!DescSS.Legacy.Gen.u1DefBig)
5020 {
5021 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5022 uNewEsp = (uint16_t)uNewEsp;
5023 }
5024
5025 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5026
5027 /* Check that there is sufficient space for the stack frame. */
5028 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5029 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5030 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5031 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5032
5033 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5034 {
5035 if ( uNewEsp - 1 > cbLimitSS
5036 || uNewEsp < cbStackFrame)
5037 {
5038 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5039 u8Vector, NewSS, uNewEsp, cbStackFrame));
5040 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5041 }
5042 }
5043 else
5044 {
5045 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5046 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5047 {
5048 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5049 u8Vector, NewSS, uNewEsp, cbStackFrame));
5050 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5051 }
5052 }
5053
5054 /*
5055 * Start making changes.
5056 */
5057
5058 /* Set the new CPL so that stack accesses use it. */
5059 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5060 pVCpu->iem.s.uCpl = uNewCpl;
5061
5062 /* Create the stack frame. */
5063 RTPTRUNION uStackFrame;
5064 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5065 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5066 if (rcStrict != VINF_SUCCESS)
5067 return rcStrict;
5068 void * const pvStackFrame = uStackFrame.pv;
5069 if (f32BitGate)
5070 {
5071 if (fFlags & IEM_XCPT_FLAGS_ERR)
5072 *uStackFrame.pu32++ = uErr;
5073 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5074 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5075 uStackFrame.pu32[2] = fEfl;
5076 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5077 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5078 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5079 if (fEfl & X86_EFL_VM)
5080 {
5081 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5082 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5083 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5084 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5085 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5086 }
5087 }
5088 else
5089 {
5090 if (fFlags & IEM_XCPT_FLAGS_ERR)
5091 *uStackFrame.pu16++ = uErr;
5092 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5093 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5094 uStackFrame.pu16[2] = fEfl;
5095 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5096 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5097 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5098 if (fEfl & X86_EFL_VM)
5099 {
5100 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5101 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5102 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5103 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5104 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5105 }
5106 }
5107 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5108 if (rcStrict != VINF_SUCCESS)
5109 return rcStrict;
5110
5111 /* Mark the selectors 'accessed' (hope this is the correct time). */
5112 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5113 * after pushing the stack frame? (Write protect the gdt + stack to
5114 * find out.) */
5115 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5116 {
5117 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5118 if (rcStrict != VINF_SUCCESS)
5119 return rcStrict;
5120 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5121 }
5122
5123 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5124 {
5125 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5126 if (rcStrict != VINF_SUCCESS)
5127 return rcStrict;
5128 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5129 }
5130
5131 /*
5132 * Start comitting the register changes (joins with the DPL=CPL branch).
5133 */
5134 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5135 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5136 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5137 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5138 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5139 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5140 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5141 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5142 * SP is loaded).
5143 * Need to check the other combinations too:
5144 * - 16-bit TSS, 32-bit handler
5145 * - 32-bit TSS, 16-bit handler */
5146 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5147 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5148 else
5149 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5150
5151 if (fEfl & X86_EFL_VM)
5152 {
5153 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5154 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5155 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5156 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5157 }
5158 }
5159 /*
5160 * Same privilege, no stack change and smaller stack frame.
5161 */
5162 else
5163 {
5164 uint64_t uNewRsp;
5165 RTPTRUNION uStackFrame;
5166 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5167 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5168 if (rcStrict != VINF_SUCCESS)
5169 return rcStrict;
5170 void * const pvStackFrame = uStackFrame.pv;
5171
5172 if (f32BitGate)
5173 {
5174 if (fFlags & IEM_XCPT_FLAGS_ERR)
5175 *uStackFrame.pu32++ = uErr;
5176 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5177 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5178 uStackFrame.pu32[2] = fEfl;
5179 }
5180 else
5181 {
5182 if (fFlags & IEM_XCPT_FLAGS_ERR)
5183 *uStackFrame.pu16++ = uErr;
5184 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5185 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5186 uStackFrame.pu16[2] = fEfl;
5187 }
5188 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5189 if (rcStrict != VINF_SUCCESS)
5190 return rcStrict;
5191
5192 /* Mark the CS selector as 'accessed'. */
5193 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5194 {
5195 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5196 if (rcStrict != VINF_SUCCESS)
5197 return rcStrict;
5198 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5199 }
5200
5201 /*
5202 * Start committing the register changes (joins with the other branch).
5203 */
5204 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5205 }
5206
5207 /* ... register committing continues. */
5208 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5209 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5210 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5211 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5212 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5213 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5214
5215 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5216 fEfl &= ~fEflToClear;
5217 IEMMISC_SET_EFL(pVCpu, fEfl);
5218
5219 if (fFlags & IEM_XCPT_FLAGS_CR2)
5220 pVCpu->cpum.GstCtx.cr2 = uCr2;
5221
5222 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5223 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5224
5225 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5226}
5227
5228
5229/**
5230 * Implements exceptions and interrupts for long mode.
5231 *
5232 * @returns VBox strict status code.
5233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5234 * @param cbInstr The number of bytes to offset rIP by in the return
5235 * address.
5236 * @param u8Vector The interrupt / exception vector number.
5237 * @param fFlags The flags.
5238 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5239 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5240 */
5241IEM_STATIC VBOXSTRICTRC
5242iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5243 uint8_t cbInstr,
5244 uint8_t u8Vector,
5245 uint32_t fFlags,
5246 uint16_t uErr,
5247 uint64_t uCr2)
5248{
5249 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5250
5251 /*
5252 * Read the IDT entry.
5253 */
5254 uint16_t offIdt = (uint16_t)u8Vector << 4;
5255 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5256 {
5257 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5258 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5259 }
5260 X86DESC64 Idte;
5261 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5262 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5263 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5264 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5265 {
5266 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5267 return rcStrict;
5268 }
5269 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5270 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5271 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5272
5273 /*
5274 * Check the descriptor type, DPL and such.
5275 * ASSUMES this is done in the same order as described for call-gate calls.
5276 */
5277 if (Idte.Gate.u1DescType)
5278 {
5279 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5280 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5281 }
5282 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5283 switch (Idte.Gate.u4Type)
5284 {
5285 case AMD64_SEL_TYPE_SYS_INT_GATE:
5286 fEflToClear |= X86_EFL_IF;
5287 break;
5288 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5289 break;
5290
5291 default:
5292 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5293 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5294 }
5295
5296 /* Check DPL against CPL if applicable. */
5297 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5298 {
5299 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5300 {
5301 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5302 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5303 }
5304 }
5305
5306 /* Is it there? */
5307 if (!Idte.Gate.u1Present)
5308 {
5309 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5310 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5311 }
5312
5313 /* A null CS is bad. */
5314 RTSEL NewCS = Idte.Gate.u16Sel;
5315 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5316 {
5317 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5318 return iemRaiseGeneralProtectionFault0(pVCpu);
5319 }
5320
5321 /* Fetch the descriptor for the new CS. */
5322 IEMSELDESC DescCS;
5323 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5324 if (rcStrict != VINF_SUCCESS)
5325 {
5326 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5327 return rcStrict;
5328 }
5329
5330 /* Must be a 64-bit code segment. */
5331 if (!DescCS.Long.Gen.u1DescType)
5332 {
5333 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5334 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5335 }
5336 if ( !DescCS.Long.Gen.u1Long
5337 || DescCS.Long.Gen.u1DefBig
5338 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5339 {
5340 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5341 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5342 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5343 }
5344
5345 /* Don't allow lowering the privilege level. For non-conforming CS
5346 selectors, the CS.DPL sets the privilege level the trap/interrupt
5347 handler runs at. For conforming CS selectors, the CPL remains
5348 unchanged, but the CS.DPL must be <= CPL. */
5349 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5350 * when CPU in Ring-0. Result \#GP? */
5351 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5352 {
5353 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5354 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5355 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5356 }
5357
5358
5359 /* Make sure the selector is present. */
5360 if (!DescCS.Legacy.Gen.u1Present)
5361 {
5362 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5363 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5364 }
5365
5366 /* Check that the new RIP is canonical. */
5367 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5368 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5369 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5370 if (!IEM_IS_CANONICAL(uNewRip))
5371 {
5372 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5373 return iemRaiseGeneralProtectionFault0(pVCpu);
5374 }
5375
5376 /*
5377 * If the privilege level changes or if the IST isn't zero, we need to get
5378 * a new stack from the TSS.
5379 */
5380 uint64_t uNewRsp;
5381 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5382 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5383 if ( uNewCpl != pVCpu->iem.s.uCpl
5384 || Idte.Gate.u3IST != 0)
5385 {
5386 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5387 if (rcStrict != VINF_SUCCESS)
5388 return rcStrict;
5389 }
5390 else
5391 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5392 uNewRsp &= ~(uint64_t)0xf;
5393
5394 /*
5395 * Calc the flag image to push.
5396 */
5397 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5398 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5399 fEfl &= ~X86_EFL_RF;
5400 else
5401 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5402
5403 /*
5404 * Start making changes.
5405 */
5406 /* Set the new CPL so that stack accesses use it. */
5407 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5408 pVCpu->iem.s.uCpl = uNewCpl;
5409
5410 /* Create the stack frame. */
5411 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5412 RTPTRUNION uStackFrame;
5413 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5414 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5415 if (rcStrict != VINF_SUCCESS)
5416 return rcStrict;
5417 void * const pvStackFrame = uStackFrame.pv;
5418
5419 if (fFlags & IEM_XCPT_FLAGS_ERR)
5420 *uStackFrame.pu64++ = uErr;
5421 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5422 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5423 uStackFrame.pu64[2] = fEfl;
5424 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5425 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5426 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5427 if (rcStrict != VINF_SUCCESS)
5428 return rcStrict;
5429
5430 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5431 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5432 * after pushing the stack frame? (Write protect the gdt + stack to
5433 * find out.) */
5434 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5435 {
5436 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5437 if (rcStrict != VINF_SUCCESS)
5438 return rcStrict;
5439 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5440 }
5441
5442 /*
5443 * Start comitting the register changes.
5444 */
5445 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5446 * hidden registers when interrupting 32-bit or 16-bit code! */
5447 if (uNewCpl != uOldCpl)
5448 {
5449 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5450 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5451 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5452 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5453 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5454 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5455 }
5456 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5457 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5458 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5459 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5460 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5461 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5462 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5463 pVCpu->cpum.GstCtx.rip = uNewRip;
5464
5465 fEfl &= ~fEflToClear;
5466 IEMMISC_SET_EFL(pVCpu, fEfl);
5467
5468 if (fFlags & IEM_XCPT_FLAGS_CR2)
5469 pVCpu->cpum.GstCtx.cr2 = uCr2;
5470
5471 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5472 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5473
5474 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5475}
5476
5477
5478/**
5479 * Implements exceptions and interrupts.
5480 *
5481 * All exceptions and interrupts goes thru this function!
5482 *
5483 * @returns VBox strict status code.
5484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5485 * @param cbInstr The number of bytes to offset rIP by in the return
5486 * address.
5487 * @param u8Vector The interrupt / exception vector number.
5488 * @param fFlags The flags.
5489 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5490 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5491 */
5492DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5493iemRaiseXcptOrInt(PVMCPU pVCpu,
5494 uint8_t cbInstr,
5495 uint8_t u8Vector,
5496 uint32_t fFlags,
5497 uint16_t uErr,
5498 uint64_t uCr2)
5499{
5500 /*
5501 * Get all the state that we might need here.
5502 */
5503 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5504 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5505
5506#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5507 /*
5508 * Flush prefetch buffer
5509 */
5510 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5511#endif
5512
5513 /*
5514 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5515 */
5516 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5517 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5518 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5519 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5520 {
5521 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5522 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5523 u8Vector = X86_XCPT_GP;
5524 uErr = 0;
5525 }
5526#ifdef DBGFTRACE_ENABLED
5527 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5528 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5530#endif
5531
5532#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5533 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5534 {
5535 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5536 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5537 return rcStrict0;
5538 }
5539#endif
5540
5541#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5542 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5543 {
5544 /*
5545 * If the event is being injected as part of VMRUN, it isn't subject to event
5546 * intercepts in the nested-guest. However, secondary exceptions that occur
5547 * during injection of any event -are- subject to exception intercepts.
5548 *
5549 * See AMD spec. 15.20 "Event Injection".
5550 */
5551 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5552 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5553 else
5554 {
5555 /*
5556 * Check and handle if the event being raised is intercepted.
5557 */
5558 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5559 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5560 return rcStrict0;
5561 }
5562 }
5563#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5564
5565 /*
5566 * Do recursion accounting.
5567 */
5568 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5569 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5570 if (pVCpu->iem.s.cXcptRecursions == 0)
5571 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5572 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5573 else
5574 {
5575 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5576 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5577 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5578
5579 if (pVCpu->iem.s.cXcptRecursions >= 4)
5580 {
5581#ifdef DEBUG_bird
5582 AssertFailed();
5583#endif
5584 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5585 }
5586
5587 /*
5588 * Evaluate the sequence of recurring events.
5589 */
5590 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5591 NULL /* pXcptRaiseInfo */);
5592 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5593 { /* likely */ }
5594 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5595 {
5596 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5597 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5598 u8Vector = X86_XCPT_DF;
5599 uErr = 0;
5600 /** @todo NSTVMX: Do we need to do something here for VMX? */
5601 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5602 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5603 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5604 }
5605 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5606 {
5607 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5608 return iemInitiateCpuShutdown(pVCpu);
5609 }
5610 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5611 {
5612 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5613 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5614 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5615 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5616 return VERR_EM_GUEST_CPU_HANG;
5617 }
5618 else
5619 {
5620 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5621 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5622 return VERR_IEM_IPE_9;
5623 }
5624
5625 /*
5626 * The 'EXT' bit is set when an exception occurs during deliver of an external
5627 * event (such as an interrupt or earlier exception)[1]. Privileged software
5628 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5629 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5630 *
5631 * [1] - Intel spec. 6.13 "Error Code"
5632 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5633 * [3] - Intel Instruction reference for INT n.
5634 */
5635 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5636 && (fFlags & IEM_XCPT_FLAGS_ERR)
5637 && u8Vector != X86_XCPT_PF
5638 && u8Vector != X86_XCPT_DF)
5639 {
5640 uErr |= X86_TRAP_ERR_EXTERNAL;
5641 }
5642 }
5643
5644 pVCpu->iem.s.cXcptRecursions++;
5645 pVCpu->iem.s.uCurXcpt = u8Vector;
5646 pVCpu->iem.s.fCurXcpt = fFlags;
5647 pVCpu->iem.s.uCurXcptErr = uErr;
5648 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5649
5650 /*
5651 * Extensive logging.
5652 */
5653#if defined(LOG_ENABLED) && defined(IN_RING3)
5654 if (LogIs3Enabled())
5655 {
5656 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5657 PVM pVM = pVCpu->CTX_SUFF(pVM);
5658 char szRegs[4096];
5659 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5660 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5661 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5662 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5663 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5664 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5665 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5666 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5667 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5668 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5669 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5670 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5671 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5672 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5673 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5674 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5675 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5676 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5677 " efer=%016VR{efer}\n"
5678 " pat=%016VR{pat}\n"
5679 " sf_mask=%016VR{sf_mask}\n"
5680 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5681 " lstar=%016VR{lstar}\n"
5682 " star=%016VR{star} cstar=%016VR{cstar}\n"
5683 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5684 );
5685
5686 char szInstr[256];
5687 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5688 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5689 szInstr, sizeof(szInstr), NULL);
5690 Log3(("%s%s\n", szRegs, szInstr));
5691 }
5692#endif /* LOG_ENABLED */
5693
5694 /*
5695 * Call the mode specific worker function.
5696 */
5697 VBOXSTRICTRC rcStrict;
5698 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5699 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5700 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5701 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5702 else
5703 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5704
5705 /* Flush the prefetch buffer. */
5706#ifdef IEM_WITH_CODE_TLB
5707 pVCpu->iem.s.pbInstrBuf = NULL;
5708#else
5709 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5710#endif
5711
5712 /*
5713 * Unwind.
5714 */
5715 pVCpu->iem.s.cXcptRecursions--;
5716 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5717 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5718 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5719 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5720 pVCpu->iem.s.cXcptRecursions + 1));
5721 return rcStrict;
5722}
5723
5724#ifdef IEM_WITH_SETJMP
5725/**
5726 * See iemRaiseXcptOrInt. Will not return.
5727 */
5728IEM_STATIC DECL_NO_RETURN(void)
5729iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5730 uint8_t cbInstr,
5731 uint8_t u8Vector,
5732 uint32_t fFlags,
5733 uint16_t uErr,
5734 uint64_t uCr2)
5735{
5736 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5737 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5738}
5739#endif
5740
5741
5742/** \#DE - 00. */
5743DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5744{
5745 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5746}
5747
5748
5749/** \#DB - 01.
5750 * @note This automatically clear DR7.GD. */
5751DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5752{
5753 /** @todo set/clear RF. */
5754 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5755 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5756}
5757
5758
5759/** \#BR - 05. */
5760DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5761{
5762 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5763}
5764
5765
5766/** \#UD - 06. */
5767DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5768{
5769 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5770}
5771
5772
5773/** \#NM - 07. */
5774DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5775{
5776 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5777}
5778
5779
5780/** \#TS(err) - 0a. */
5781DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5782{
5783 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5784}
5785
5786
5787/** \#TS(tr) - 0a. */
5788DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5789{
5790 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5791 pVCpu->cpum.GstCtx.tr.Sel, 0);
5792}
5793
5794
5795/** \#TS(0) - 0a. */
5796DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5797{
5798 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5799 0, 0);
5800}
5801
5802
5803/** \#TS(err) - 0a. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5805{
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5807 uSel & X86_SEL_MASK_OFF_RPL, 0);
5808}
5809
5810
5811/** \#NP(err) - 0b. */
5812DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5813{
5814 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5815}
5816
5817
5818/** \#NP(sel) - 0b. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5820{
5821 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5822 uSel & ~X86_SEL_RPL, 0);
5823}
5824
5825
5826/** \#SS(seg) - 0c. */
5827DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5828{
5829 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5830 uSel & ~X86_SEL_RPL, 0);
5831}
5832
5833
5834/** \#SS(err) - 0c. */
5835DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5836{
5837 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5838}
5839
5840
5841/** \#GP(n) - 0d. */
5842DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5843{
5844 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5845}
5846
5847
5848/** \#GP(0) - 0d. */
5849DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5850{
5851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5852}
5853
5854#ifdef IEM_WITH_SETJMP
5855/** \#GP(0) - 0d. */
5856DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5857{
5858 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5859}
5860#endif
5861
5862
5863/** \#GP(sel) - 0d. */
5864DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5865{
5866 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5867 Sel & ~X86_SEL_RPL, 0);
5868}
5869
5870
5871/** \#GP(0) - 0d. */
5872DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5873{
5874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5875}
5876
5877
5878/** \#GP(sel) - 0d. */
5879DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5880{
5881 NOREF(iSegReg); NOREF(fAccess);
5882 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5883 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5884}
5885
5886#ifdef IEM_WITH_SETJMP
5887/** \#GP(sel) - 0d, longjmp. */
5888DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5889{
5890 NOREF(iSegReg); NOREF(fAccess);
5891 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5892 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5893}
5894#endif
5895
5896/** \#GP(sel) - 0d. */
5897DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5898{
5899 NOREF(Sel);
5900 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5901}
5902
5903#ifdef IEM_WITH_SETJMP
5904/** \#GP(sel) - 0d, longjmp. */
5905DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5906{
5907 NOREF(Sel);
5908 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5909}
5910#endif
5911
5912
5913/** \#GP(sel) - 0d. */
5914DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5915{
5916 NOREF(iSegReg); NOREF(fAccess);
5917 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5918}
5919
5920#ifdef IEM_WITH_SETJMP
5921/** \#GP(sel) - 0d, longjmp. */
5922DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5923 uint32_t fAccess)
5924{
5925 NOREF(iSegReg); NOREF(fAccess);
5926 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5927}
5928#endif
5929
5930
5931/** \#PF(n) - 0e. */
5932DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5933{
5934 uint16_t uErr;
5935 switch (rc)
5936 {
5937 case VERR_PAGE_NOT_PRESENT:
5938 case VERR_PAGE_TABLE_NOT_PRESENT:
5939 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5940 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5941 uErr = 0;
5942 break;
5943
5944 default:
5945 AssertMsgFailed(("%Rrc\n", rc));
5946 RT_FALL_THRU();
5947 case VERR_ACCESS_DENIED:
5948 uErr = X86_TRAP_PF_P;
5949 break;
5950
5951 /** @todo reserved */
5952 }
5953
5954 if (pVCpu->iem.s.uCpl == 3)
5955 uErr |= X86_TRAP_PF_US;
5956
5957 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5958 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5959 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5960 uErr |= X86_TRAP_PF_ID;
5961
5962#if 0 /* This is so much non-sense, really. Why was it done like that? */
5963 /* Note! RW access callers reporting a WRITE protection fault, will clear
5964 the READ flag before calling. So, read-modify-write accesses (RW)
5965 can safely be reported as READ faults. */
5966 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5967 uErr |= X86_TRAP_PF_RW;
5968#else
5969 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5970 {
5971 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5972 uErr |= X86_TRAP_PF_RW;
5973 }
5974#endif
5975
5976 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5977 uErr, GCPtrWhere);
5978}
5979
5980#ifdef IEM_WITH_SETJMP
5981/** \#PF(n) - 0e, longjmp. */
5982IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5983{
5984 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5985}
5986#endif
5987
5988
5989/** \#MF(0) - 10. */
5990DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5991{
5992 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5993}
5994
5995
5996/** \#AC(0) - 11. */
5997DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5998{
5999 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6000}
6001
6002
6003/**
6004 * Macro for calling iemCImplRaiseDivideError().
6005 *
6006 * This enables us to add/remove arguments and force different levels of
6007 * inlining as we wish.
6008 *
6009 * @return Strict VBox status code.
6010 */
6011#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6012IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6013{
6014 NOREF(cbInstr);
6015 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6016}
6017
6018
6019/**
6020 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6021 *
6022 * This enables us to add/remove arguments and force different levels of
6023 * inlining as we wish.
6024 *
6025 * @return Strict VBox status code.
6026 */
6027#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6028IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6029{
6030 NOREF(cbInstr);
6031 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6032}
6033
6034
6035/**
6036 * Macro for calling iemCImplRaiseInvalidOpcode().
6037 *
6038 * This enables us to add/remove arguments and force different levels of
6039 * inlining as we wish.
6040 *
6041 * @return Strict VBox status code.
6042 */
6043#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6044IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6045{
6046 NOREF(cbInstr);
6047 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6048}
6049
6050
6051/** @} */
6052
6053
6054/*
6055 *
6056 * Helpers routines.
6057 * Helpers routines.
6058 * Helpers routines.
6059 *
6060 */
6061
6062/**
6063 * Recalculates the effective operand size.
6064 *
6065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6066 */
6067IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6068{
6069 switch (pVCpu->iem.s.enmCpuMode)
6070 {
6071 case IEMMODE_16BIT:
6072 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6073 break;
6074 case IEMMODE_32BIT:
6075 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6076 break;
6077 case IEMMODE_64BIT:
6078 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6079 {
6080 case 0:
6081 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6082 break;
6083 case IEM_OP_PRF_SIZE_OP:
6084 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6085 break;
6086 case IEM_OP_PRF_SIZE_REX_W:
6087 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6088 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6089 break;
6090 }
6091 break;
6092 default:
6093 AssertFailed();
6094 }
6095}
6096
6097
6098/**
6099 * Sets the default operand size to 64-bit and recalculates the effective
6100 * operand size.
6101 *
6102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6103 */
6104IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6105{
6106 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6107 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6108 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6109 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6110 else
6111 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6112}
6113
6114
6115/*
6116 *
6117 * Common opcode decoders.
6118 * Common opcode decoders.
6119 * Common opcode decoders.
6120 *
6121 */
6122//#include <iprt/mem.h>
6123
6124/**
6125 * Used to add extra details about a stub case.
6126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6127 */
6128IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6129{
6130#if defined(LOG_ENABLED) && defined(IN_RING3)
6131 PVM pVM = pVCpu->CTX_SUFF(pVM);
6132 char szRegs[4096];
6133 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6134 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6135 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6136 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6137 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6138 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6139 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6140 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6141 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6142 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6143 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6144 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6145 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6146 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6147 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6148 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6149 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6150 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6151 " efer=%016VR{efer}\n"
6152 " pat=%016VR{pat}\n"
6153 " sf_mask=%016VR{sf_mask}\n"
6154 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6155 " lstar=%016VR{lstar}\n"
6156 " star=%016VR{star} cstar=%016VR{cstar}\n"
6157 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6158 );
6159
6160 char szInstr[256];
6161 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6162 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6163 szInstr, sizeof(szInstr), NULL);
6164
6165 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6166#else
6167 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6168#endif
6169}
6170
6171/**
6172 * Complains about a stub.
6173 *
6174 * Providing two versions of this macro, one for daily use and one for use when
6175 * working on IEM.
6176 */
6177#if 0
6178# define IEMOP_BITCH_ABOUT_STUB() \
6179 do { \
6180 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6181 iemOpStubMsg2(pVCpu); \
6182 RTAssertPanic(); \
6183 } while (0)
6184#else
6185# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6186#endif
6187
6188/** Stubs an opcode. */
6189#define FNIEMOP_STUB(a_Name) \
6190 FNIEMOP_DEF(a_Name) \
6191 { \
6192 RT_NOREF_PV(pVCpu); \
6193 IEMOP_BITCH_ABOUT_STUB(); \
6194 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6195 } \
6196 typedef int ignore_semicolon
6197
6198/** Stubs an opcode. */
6199#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6200 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6201 { \
6202 RT_NOREF_PV(pVCpu); \
6203 RT_NOREF_PV(a_Name0); \
6204 IEMOP_BITCH_ABOUT_STUB(); \
6205 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6206 } \
6207 typedef int ignore_semicolon
6208
6209/** Stubs an opcode which currently should raise \#UD. */
6210#define FNIEMOP_UD_STUB(a_Name) \
6211 FNIEMOP_DEF(a_Name) \
6212 { \
6213 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6214 return IEMOP_RAISE_INVALID_OPCODE(); \
6215 } \
6216 typedef int ignore_semicolon
6217
6218/** Stubs an opcode which currently should raise \#UD. */
6219#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6220 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6221 { \
6222 RT_NOREF_PV(pVCpu); \
6223 RT_NOREF_PV(a_Name0); \
6224 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6225 return IEMOP_RAISE_INVALID_OPCODE(); \
6226 } \
6227 typedef int ignore_semicolon
6228
6229
6230
6231/** @name Register Access.
6232 * @{
6233 */
6234
6235/**
6236 * Gets a reference (pointer) to the specified hidden segment register.
6237 *
6238 * @returns Hidden register reference.
6239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6240 * @param iSegReg The segment register.
6241 */
6242IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6243{
6244 Assert(iSegReg < X86_SREG_COUNT);
6245 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6246 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6247
6248#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6249 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6250 { /* likely */ }
6251 else
6252 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6253#else
6254 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6255#endif
6256 return pSReg;
6257}
6258
6259
6260/**
6261 * Ensures that the given hidden segment register is up to date.
6262 *
6263 * @returns Hidden register reference.
6264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6265 * @param pSReg The segment register.
6266 */
6267IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6268{
6269#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6270 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6271 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6272#else
6273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6274 NOREF(pVCpu);
6275#endif
6276 return pSReg;
6277}
6278
6279
6280/**
6281 * Gets a reference (pointer) to the specified segment register (the selector
6282 * value).
6283 *
6284 * @returns Pointer to the selector variable.
6285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6286 * @param iSegReg The segment register.
6287 */
6288DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6289{
6290 Assert(iSegReg < X86_SREG_COUNT);
6291 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6292 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6293}
6294
6295
6296/**
6297 * Fetches the selector value of a segment register.
6298 *
6299 * @returns The selector value.
6300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6301 * @param iSegReg The segment register.
6302 */
6303DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6304{
6305 Assert(iSegReg < X86_SREG_COUNT);
6306 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6307 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6308}
6309
6310
6311/**
6312 * Fetches the base address value of a segment register.
6313 *
6314 * @returns The selector value.
6315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6316 * @param iSegReg The segment register.
6317 */
6318DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6319{
6320 Assert(iSegReg < X86_SREG_COUNT);
6321 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6322 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6323}
6324
6325
6326/**
6327 * Gets a reference (pointer) to the specified general purpose register.
6328 *
6329 * @returns Register reference.
6330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6331 * @param iReg The general purpose register.
6332 */
6333DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6334{
6335 Assert(iReg < 16);
6336 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6337}
6338
6339
6340/**
6341 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6342 *
6343 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6344 *
6345 * @returns Register reference.
6346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6347 * @param iReg The register.
6348 */
6349DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6350{
6351 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6352 {
6353 Assert(iReg < 16);
6354 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6355 }
6356 /* high 8-bit register. */
6357 Assert(iReg < 8);
6358 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6359}
6360
6361
6362/**
6363 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6364 *
6365 * @returns Register reference.
6366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6367 * @param iReg The register.
6368 */
6369DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6370{
6371 Assert(iReg < 16);
6372 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6373}
6374
6375
6376/**
6377 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6378 *
6379 * @returns Register reference.
6380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6381 * @param iReg The register.
6382 */
6383DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6384{
6385 Assert(iReg < 16);
6386 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6387}
6388
6389
6390/**
6391 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6392 *
6393 * @returns Register reference.
6394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6395 * @param iReg The register.
6396 */
6397DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6398{
6399 Assert(iReg < 64);
6400 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6401}
6402
6403
6404/**
6405 * Gets a reference (pointer) to the specified segment register's base address.
6406 *
6407 * @returns Segment register base address reference.
6408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6409 * @param iSegReg The segment selector.
6410 */
6411DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6412{
6413 Assert(iSegReg < X86_SREG_COUNT);
6414 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6415 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6416}
6417
6418
6419/**
6420 * Fetches the value of a 8-bit general purpose register.
6421 *
6422 * @returns The register value.
6423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6424 * @param iReg The register.
6425 */
6426DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6427{
6428 return *iemGRegRefU8(pVCpu, iReg);
6429}
6430
6431
6432/**
6433 * Fetches the value of a 16-bit general purpose register.
6434 *
6435 * @returns The register value.
6436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6437 * @param iReg The register.
6438 */
6439DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6440{
6441 Assert(iReg < 16);
6442 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6443}
6444
6445
6446/**
6447 * Fetches the value of a 32-bit general purpose register.
6448 *
6449 * @returns The register value.
6450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6451 * @param iReg The register.
6452 */
6453DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6454{
6455 Assert(iReg < 16);
6456 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6457}
6458
6459
6460/**
6461 * Fetches the value of a 64-bit general purpose register.
6462 *
6463 * @returns The register value.
6464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6465 * @param iReg The register.
6466 */
6467DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6468{
6469 Assert(iReg < 16);
6470 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6471}
6472
6473
6474/**
6475 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6476 *
6477 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6478 * segment limit.
6479 *
6480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6481 * @param offNextInstr The offset of the next instruction.
6482 */
6483IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6484{
6485 switch (pVCpu->iem.s.enmEffOpSize)
6486 {
6487 case IEMMODE_16BIT:
6488 {
6489 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6490 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6491 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6492 return iemRaiseGeneralProtectionFault0(pVCpu);
6493 pVCpu->cpum.GstCtx.rip = uNewIp;
6494 break;
6495 }
6496
6497 case IEMMODE_32BIT:
6498 {
6499 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6500 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6501
6502 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6503 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6504 return iemRaiseGeneralProtectionFault0(pVCpu);
6505 pVCpu->cpum.GstCtx.rip = uNewEip;
6506 break;
6507 }
6508
6509 case IEMMODE_64BIT:
6510 {
6511 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6512
6513 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6514 if (!IEM_IS_CANONICAL(uNewRip))
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pVCpu->cpum.GstCtx.rip = uNewRip;
6517 break;
6518 }
6519
6520 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6521 }
6522
6523 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6524
6525#ifndef IEM_WITH_CODE_TLB
6526 /* Flush the prefetch buffer. */
6527 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6528#endif
6529
6530 return VINF_SUCCESS;
6531}
6532
6533
6534/**
6535 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6536 *
6537 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6538 * segment limit.
6539 *
6540 * @returns Strict VBox status code.
6541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6542 * @param offNextInstr The offset of the next instruction.
6543 */
6544IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6545{
6546 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6547
6548 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6549 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6550 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6551 return iemRaiseGeneralProtectionFault0(pVCpu);
6552 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6553 pVCpu->cpum.GstCtx.rip = uNewIp;
6554 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6555
6556#ifndef IEM_WITH_CODE_TLB
6557 /* Flush the prefetch buffer. */
6558 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6559#endif
6560
6561 return VINF_SUCCESS;
6562}
6563
6564
6565/**
6566 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6567 *
6568 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6569 * segment limit.
6570 *
6571 * @returns Strict VBox status code.
6572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6573 * @param offNextInstr The offset of the next instruction.
6574 */
6575IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6576{
6577 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6578
6579 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6580 {
6581 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6582
6583 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6584 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6585 return iemRaiseGeneralProtectionFault0(pVCpu);
6586 pVCpu->cpum.GstCtx.rip = uNewEip;
6587 }
6588 else
6589 {
6590 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6591
6592 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6593 if (!IEM_IS_CANONICAL(uNewRip))
6594 return iemRaiseGeneralProtectionFault0(pVCpu);
6595 pVCpu->cpum.GstCtx.rip = uNewRip;
6596 }
6597 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6598
6599#ifndef IEM_WITH_CODE_TLB
6600 /* Flush the prefetch buffer. */
6601 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6602#endif
6603
6604 return VINF_SUCCESS;
6605}
6606
6607
6608/**
6609 * Performs a near jump to the specified address.
6610 *
6611 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6612 * segment limit.
6613 *
6614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6615 * @param uNewRip The new RIP value.
6616 */
6617IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6618{
6619 switch (pVCpu->iem.s.enmEffOpSize)
6620 {
6621 case IEMMODE_16BIT:
6622 {
6623 Assert(uNewRip <= UINT16_MAX);
6624 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6625 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6626 return iemRaiseGeneralProtectionFault0(pVCpu);
6627 /** @todo Test 16-bit jump in 64-bit mode. */
6628 pVCpu->cpum.GstCtx.rip = uNewRip;
6629 break;
6630 }
6631
6632 case IEMMODE_32BIT:
6633 {
6634 Assert(uNewRip <= UINT32_MAX);
6635 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6636 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6637
6638 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6639 return iemRaiseGeneralProtectionFault0(pVCpu);
6640 pVCpu->cpum.GstCtx.rip = uNewRip;
6641 break;
6642 }
6643
6644 case IEMMODE_64BIT:
6645 {
6646 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6647
6648 if (!IEM_IS_CANONICAL(uNewRip))
6649 return iemRaiseGeneralProtectionFault0(pVCpu);
6650 pVCpu->cpum.GstCtx.rip = uNewRip;
6651 break;
6652 }
6653
6654 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6655 }
6656
6657 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6658
6659#ifndef IEM_WITH_CODE_TLB
6660 /* Flush the prefetch buffer. */
6661 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6662#endif
6663
6664 return VINF_SUCCESS;
6665}
6666
6667
6668/**
6669 * Get the address of the top of the stack.
6670 *
6671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6672 */
6673DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6674{
6675 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6676 return pVCpu->cpum.GstCtx.rsp;
6677 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6678 return pVCpu->cpum.GstCtx.esp;
6679 return pVCpu->cpum.GstCtx.sp;
6680}
6681
6682
6683/**
6684 * Updates the RIP/EIP/IP to point to the next instruction.
6685 *
6686 * This function leaves the EFLAGS.RF flag alone.
6687 *
6688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6689 * @param cbInstr The number of bytes to add.
6690 */
6691IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6692{
6693 switch (pVCpu->iem.s.enmCpuMode)
6694 {
6695 case IEMMODE_16BIT:
6696 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6697 pVCpu->cpum.GstCtx.eip += cbInstr;
6698 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6699 break;
6700
6701 case IEMMODE_32BIT:
6702 pVCpu->cpum.GstCtx.eip += cbInstr;
6703 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6704 break;
6705
6706 case IEMMODE_64BIT:
6707 pVCpu->cpum.GstCtx.rip += cbInstr;
6708 break;
6709 default: AssertFailed();
6710 }
6711}
6712
6713
6714#if 0
6715/**
6716 * Updates the RIP/EIP/IP to point to the next instruction.
6717 *
6718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6719 */
6720IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6721{
6722 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6723}
6724#endif
6725
6726
6727
6728/**
6729 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6730 *
6731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6732 * @param cbInstr The number of bytes to add.
6733 */
6734IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6735{
6736 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6737
6738 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6739#if ARCH_BITS >= 64
6740 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6741 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6742 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6743#else
6744 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6745 pVCpu->cpum.GstCtx.rip += cbInstr;
6746 else
6747 pVCpu->cpum.GstCtx.eip += cbInstr;
6748#endif
6749}
6750
6751
6752/**
6753 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6754 *
6755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6756 */
6757IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6758{
6759 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6760}
6761
6762
6763/**
6764 * Adds to the stack pointer.
6765 *
6766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6767 * @param cbToAdd The number of bytes to add (8-bit!).
6768 */
6769DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6770{
6771 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6772 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6773 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6774 pVCpu->cpum.GstCtx.esp += cbToAdd;
6775 else
6776 pVCpu->cpum.GstCtx.sp += cbToAdd;
6777}
6778
6779
6780/**
6781 * Subtracts from the stack pointer.
6782 *
6783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6784 * @param cbToSub The number of bytes to subtract (8-bit!).
6785 */
6786DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6787{
6788 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6789 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6790 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6791 pVCpu->cpum.GstCtx.esp -= cbToSub;
6792 else
6793 pVCpu->cpum.GstCtx.sp -= cbToSub;
6794}
6795
6796
6797/**
6798 * Adds to the temporary stack pointer.
6799 *
6800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6801 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6802 * @param cbToAdd The number of bytes to add (16-bit).
6803 */
6804DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6805{
6806 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6807 pTmpRsp->u += cbToAdd;
6808 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6809 pTmpRsp->DWords.dw0 += cbToAdd;
6810 else
6811 pTmpRsp->Words.w0 += cbToAdd;
6812}
6813
6814
6815/**
6816 * Subtracts from the temporary stack pointer.
6817 *
6818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6819 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6820 * @param cbToSub The number of bytes to subtract.
6821 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6822 * expecting that.
6823 */
6824DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6825{
6826 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6827 pTmpRsp->u -= cbToSub;
6828 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6829 pTmpRsp->DWords.dw0 -= cbToSub;
6830 else
6831 pTmpRsp->Words.w0 -= cbToSub;
6832}
6833
6834
6835/**
6836 * Calculates the effective stack address for a push of the specified size as
6837 * well as the new RSP value (upper bits may be masked).
6838 *
6839 * @returns Effective stack addressf for the push.
6840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6841 * @param cbItem The size of the stack item to pop.
6842 * @param puNewRsp Where to return the new RSP value.
6843 */
6844DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6845{
6846 RTUINT64U uTmpRsp;
6847 RTGCPTR GCPtrTop;
6848 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6849
6850 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6851 GCPtrTop = uTmpRsp.u -= cbItem;
6852 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6853 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6854 else
6855 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6856 *puNewRsp = uTmpRsp.u;
6857 return GCPtrTop;
6858}
6859
6860
6861/**
6862 * Gets the current stack pointer and calculates the value after a pop of the
6863 * specified size.
6864 *
6865 * @returns Current stack pointer.
6866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6867 * @param cbItem The size of the stack item to pop.
6868 * @param puNewRsp Where to return the new RSP value.
6869 */
6870DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6871{
6872 RTUINT64U uTmpRsp;
6873 RTGCPTR GCPtrTop;
6874 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6875
6876 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6877 {
6878 GCPtrTop = uTmpRsp.u;
6879 uTmpRsp.u += cbItem;
6880 }
6881 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6882 {
6883 GCPtrTop = uTmpRsp.DWords.dw0;
6884 uTmpRsp.DWords.dw0 += cbItem;
6885 }
6886 else
6887 {
6888 GCPtrTop = uTmpRsp.Words.w0;
6889 uTmpRsp.Words.w0 += cbItem;
6890 }
6891 *puNewRsp = uTmpRsp.u;
6892 return GCPtrTop;
6893}
6894
6895
6896/**
6897 * Calculates the effective stack address for a push of the specified size as
6898 * well as the new temporary RSP value (upper bits may be masked).
6899 *
6900 * @returns Effective stack addressf for the push.
6901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6902 * @param pTmpRsp The temporary stack pointer. This is updated.
6903 * @param cbItem The size of the stack item to pop.
6904 */
6905DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6906{
6907 RTGCPTR GCPtrTop;
6908
6909 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6910 GCPtrTop = pTmpRsp->u -= cbItem;
6911 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6912 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6913 else
6914 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6915 return GCPtrTop;
6916}
6917
6918
6919/**
6920 * Gets the effective stack address for a pop of the specified size and
6921 * calculates and updates the temporary RSP.
6922 *
6923 * @returns Current stack pointer.
6924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6925 * @param pTmpRsp The temporary stack pointer. This is updated.
6926 * @param cbItem The size of the stack item to pop.
6927 */
6928DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6929{
6930 RTGCPTR GCPtrTop;
6931 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6932 {
6933 GCPtrTop = pTmpRsp->u;
6934 pTmpRsp->u += cbItem;
6935 }
6936 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6937 {
6938 GCPtrTop = pTmpRsp->DWords.dw0;
6939 pTmpRsp->DWords.dw0 += cbItem;
6940 }
6941 else
6942 {
6943 GCPtrTop = pTmpRsp->Words.w0;
6944 pTmpRsp->Words.w0 += cbItem;
6945 }
6946 return GCPtrTop;
6947}
6948
6949/** @} */
6950
6951
6952/** @name FPU access and helpers.
6953 *
6954 * @{
6955 */
6956
6957
6958/**
6959 * Hook for preparing to use the host FPU.
6960 *
6961 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6962 *
6963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6964 */
6965DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6966{
6967#ifdef IN_RING3
6968 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6969#else
6970 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6971#endif
6972 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6973}
6974
6975
6976/**
6977 * Hook for preparing to use the host FPU for SSE.
6978 *
6979 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6980 *
6981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6982 */
6983DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6984{
6985 iemFpuPrepareUsage(pVCpu);
6986}
6987
6988
6989/**
6990 * Hook for preparing to use the host FPU for AVX.
6991 *
6992 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6993 *
6994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6995 */
6996DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6997{
6998 iemFpuPrepareUsage(pVCpu);
6999}
7000
7001
7002/**
7003 * Hook for actualizing the guest FPU state before the interpreter reads it.
7004 *
7005 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7006 *
7007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7008 */
7009DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7010{
7011#ifdef IN_RING3
7012 NOREF(pVCpu);
7013#else
7014 CPUMRZFpuStateActualizeForRead(pVCpu);
7015#endif
7016 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7017}
7018
7019
7020/**
7021 * Hook for actualizing the guest FPU state before the interpreter changes it.
7022 *
7023 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7024 *
7025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7026 */
7027DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7028{
7029#ifdef IN_RING3
7030 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7031#else
7032 CPUMRZFpuStateActualizeForChange(pVCpu);
7033#endif
7034 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7035}
7036
7037
7038/**
7039 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7040 * only.
7041 *
7042 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7043 *
7044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7045 */
7046DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7047{
7048#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7049 NOREF(pVCpu);
7050#else
7051 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7052#endif
7053 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7054}
7055
7056
7057/**
7058 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7059 * read+write.
7060 *
7061 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7062 *
7063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7064 */
7065DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7066{
7067#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7068 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7069#else
7070 CPUMRZFpuStateActualizeForChange(pVCpu);
7071#endif
7072 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7073}
7074
7075
7076/**
7077 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7078 * only.
7079 *
7080 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7081 *
7082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7083 */
7084DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7085{
7086#ifdef IN_RING3
7087 NOREF(pVCpu);
7088#else
7089 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7090#endif
7091 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7092}
7093
7094
7095/**
7096 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7097 * read+write.
7098 *
7099 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7100 *
7101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7102 */
7103DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7104{
7105#ifdef IN_RING3
7106 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7107#else
7108 CPUMRZFpuStateActualizeForChange(pVCpu);
7109#endif
7110 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7111}
7112
7113
7114/**
7115 * Stores a QNaN value into a FPU register.
7116 *
7117 * @param pReg Pointer to the register.
7118 */
7119DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7120{
7121 pReg->au32[0] = UINT32_C(0x00000000);
7122 pReg->au32[1] = UINT32_C(0xc0000000);
7123 pReg->au16[4] = UINT16_C(0xffff);
7124}
7125
7126
7127/**
7128 * Updates the FOP, FPU.CS and FPUIP registers.
7129 *
7130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7131 * @param pFpuCtx The FPU context.
7132 */
7133DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7134{
7135 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7136 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7137 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7138 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7139 {
7140 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7141 * happens in real mode here based on the fnsave and fnstenv images. */
7142 pFpuCtx->CS = 0;
7143 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7144 }
7145 else
7146 {
7147 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7148 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7149 }
7150}
7151
7152
7153/**
7154 * Updates the x87.DS and FPUDP registers.
7155 *
7156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7157 * @param pFpuCtx The FPU context.
7158 * @param iEffSeg The effective segment register.
7159 * @param GCPtrEff The effective address relative to @a iEffSeg.
7160 */
7161DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7162{
7163 RTSEL sel;
7164 switch (iEffSeg)
7165 {
7166 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7167 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7168 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7169 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7170 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7171 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7172 default:
7173 AssertMsgFailed(("%d\n", iEffSeg));
7174 sel = pVCpu->cpum.GstCtx.ds.Sel;
7175 }
7176 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7177 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7178 {
7179 pFpuCtx->DS = 0;
7180 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7181 }
7182 else
7183 {
7184 pFpuCtx->DS = sel;
7185 pFpuCtx->FPUDP = GCPtrEff;
7186 }
7187}
7188
7189
7190/**
7191 * Rotates the stack registers in the push direction.
7192 *
7193 * @param pFpuCtx The FPU context.
7194 * @remarks This is a complete waste of time, but fxsave stores the registers in
7195 * stack order.
7196 */
7197DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7198{
7199 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7200 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7201 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7202 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7203 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7204 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7205 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7206 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7207 pFpuCtx->aRegs[0].r80 = r80Tmp;
7208}
7209
7210
7211/**
7212 * Rotates the stack registers in the pop direction.
7213 *
7214 * @param pFpuCtx The FPU context.
7215 * @remarks This is a complete waste of time, but fxsave stores the registers in
7216 * stack order.
7217 */
7218DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7219{
7220 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7221 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7222 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7223 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7224 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7225 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7226 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7227 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7228 pFpuCtx->aRegs[7].r80 = r80Tmp;
7229}
7230
7231
7232/**
7233 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7234 * exception prevents it.
7235 *
7236 * @param pResult The FPU operation result to push.
7237 * @param pFpuCtx The FPU context.
7238 */
7239IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7240{
7241 /* Update FSW and bail if there are pending exceptions afterwards. */
7242 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7243 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7244 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7245 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7246 {
7247 pFpuCtx->FSW = fFsw;
7248 return;
7249 }
7250
7251 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7252 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7253 {
7254 /* All is fine, push the actual value. */
7255 pFpuCtx->FTW |= RT_BIT(iNewTop);
7256 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7257 }
7258 else if (pFpuCtx->FCW & X86_FCW_IM)
7259 {
7260 /* Masked stack overflow, push QNaN. */
7261 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7262 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7263 }
7264 else
7265 {
7266 /* Raise stack overflow, don't push anything. */
7267 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7268 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7269 return;
7270 }
7271
7272 fFsw &= ~X86_FSW_TOP_MASK;
7273 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7274 pFpuCtx->FSW = fFsw;
7275
7276 iemFpuRotateStackPush(pFpuCtx);
7277}
7278
7279
7280/**
7281 * Stores a result in a FPU register and updates the FSW and FTW.
7282 *
7283 * @param pFpuCtx The FPU context.
7284 * @param pResult The result to store.
7285 * @param iStReg Which FPU register to store it in.
7286 */
7287IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7288{
7289 Assert(iStReg < 8);
7290 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7291 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7292 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7293 pFpuCtx->FTW |= RT_BIT(iReg);
7294 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7295}
7296
7297
7298/**
7299 * Only updates the FPU status word (FSW) with the result of the current
7300 * instruction.
7301 *
7302 * @param pFpuCtx The FPU context.
7303 * @param u16FSW The FSW output of the current instruction.
7304 */
7305IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7306{
7307 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7308 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7309}
7310
7311
7312/**
7313 * Pops one item off the FPU stack if no pending exception prevents it.
7314 *
7315 * @param pFpuCtx The FPU context.
7316 */
7317IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7318{
7319 /* Check pending exceptions. */
7320 uint16_t uFSW = pFpuCtx->FSW;
7321 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7322 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7323 return;
7324
7325 /* TOP--. */
7326 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7327 uFSW &= ~X86_FSW_TOP_MASK;
7328 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7329 pFpuCtx->FSW = uFSW;
7330
7331 /* Mark the previous ST0 as empty. */
7332 iOldTop >>= X86_FSW_TOP_SHIFT;
7333 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7334
7335 /* Rotate the registers. */
7336 iemFpuRotateStackPop(pFpuCtx);
7337}
7338
7339
7340/**
7341 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7342 *
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param pResult The FPU operation result to push.
7345 */
7346IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7347{
7348 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7349 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7350 iemFpuMaybePushResult(pResult, pFpuCtx);
7351}
7352
7353
7354/**
7355 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7356 * and sets FPUDP and FPUDS.
7357 *
7358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7359 * @param pResult The FPU operation result to push.
7360 * @param iEffSeg The effective segment register.
7361 * @param GCPtrEff The effective address relative to @a iEffSeg.
7362 */
7363IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7364{
7365 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7366 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7367 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7368 iemFpuMaybePushResult(pResult, pFpuCtx);
7369}
7370
7371
7372/**
7373 * Replace ST0 with the first value and push the second onto the FPU stack,
7374 * unless a pending exception prevents it.
7375 *
7376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7377 * @param pResult The FPU operation result to store and push.
7378 */
7379IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7380{
7381 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7382 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7383
7384 /* Update FSW and bail if there are pending exceptions afterwards. */
7385 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7386 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7387 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7388 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7389 {
7390 pFpuCtx->FSW = fFsw;
7391 return;
7392 }
7393
7394 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7395 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7396 {
7397 /* All is fine, push the actual value. */
7398 pFpuCtx->FTW |= RT_BIT(iNewTop);
7399 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7400 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7401 }
7402 else if (pFpuCtx->FCW & X86_FCW_IM)
7403 {
7404 /* Masked stack overflow, push QNaN. */
7405 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7406 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7407 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7408 }
7409 else
7410 {
7411 /* Raise stack overflow, don't push anything. */
7412 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7413 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7414 return;
7415 }
7416
7417 fFsw &= ~X86_FSW_TOP_MASK;
7418 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7419 pFpuCtx->FSW = fFsw;
7420
7421 iemFpuRotateStackPush(pFpuCtx);
7422}
7423
7424
7425/**
7426 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7427 * FOP.
7428 *
7429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7430 * @param pResult The result to store.
7431 * @param iStReg Which FPU register to store it in.
7432 */
7433IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7434{
7435 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7436 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7437 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7438}
7439
7440
7441/**
7442 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7443 * FOP, and then pops the stack.
7444 *
7445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7446 * @param pResult The result to store.
7447 * @param iStReg Which FPU register to store it in.
7448 */
7449IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7450{
7451 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7452 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7453 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7454 iemFpuMaybePopOne(pFpuCtx);
7455}
7456
7457
7458/**
7459 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7460 * FPUDP, and FPUDS.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param pResult The result to store.
7464 * @param iStReg Which FPU register to store it in.
7465 * @param iEffSeg The effective memory operand selector register.
7466 * @param GCPtrEff The effective memory operand offset.
7467 */
7468IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7469 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7470{
7471 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7472 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7473 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7474 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7475}
7476
7477
7478/**
7479 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7480 * FPUDP, and FPUDS, and then pops the stack.
7481 *
7482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7483 * @param pResult The result to store.
7484 * @param iStReg Which FPU register to store it in.
7485 * @param iEffSeg The effective memory operand selector register.
7486 * @param GCPtrEff The effective memory operand offset.
7487 */
7488IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7489 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7490{
7491 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7492 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7493 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7494 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7495 iemFpuMaybePopOne(pFpuCtx);
7496}
7497
7498
7499/**
7500 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7501 *
7502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7503 */
7504IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7505{
7506 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7507 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7508}
7509
7510
7511/**
7512 * Marks the specified stack register as free (for FFREE).
7513 *
7514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7515 * @param iStReg The register to free.
7516 */
7517IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7518{
7519 Assert(iStReg < 8);
7520 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7521 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7522 pFpuCtx->FTW &= ~RT_BIT(iReg);
7523}
7524
7525
7526/**
7527 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7528 *
7529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7530 */
7531IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7532{
7533 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7534 uint16_t uFsw = pFpuCtx->FSW;
7535 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7536 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7537 uFsw &= ~X86_FSW_TOP_MASK;
7538 uFsw |= uTop;
7539 pFpuCtx->FSW = uFsw;
7540}
7541
7542
7543/**
7544 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7545 *
7546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7547 */
7548IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7549{
7550 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7551 uint16_t uFsw = pFpuCtx->FSW;
7552 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7553 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7554 uFsw &= ~X86_FSW_TOP_MASK;
7555 uFsw |= uTop;
7556 pFpuCtx->FSW = uFsw;
7557}
7558
7559
7560/**
7561 * Updates the FSW, FOP, FPUIP, and FPUCS.
7562 *
7563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7564 * @param u16FSW The FSW from the current instruction.
7565 */
7566IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7567{
7568 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7569 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7570 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7571}
7572
7573
7574/**
7575 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7576 *
7577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7578 * @param u16FSW The FSW from the current instruction.
7579 */
7580IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7581{
7582 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7583 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7584 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7585 iemFpuMaybePopOne(pFpuCtx);
7586}
7587
7588
7589/**
7590 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7591 *
7592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7593 * @param u16FSW The FSW from the current instruction.
7594 * @param iEffSeg The effective memory operand selector register.
7595 * @param GCPtrEff The effective memory operand offset.
7596 */
7597IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7598{
7599 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7600 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7601 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7602 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7603}
7604
7605
7606/**
7607 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7608 *
7609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7610 * @param u16FSW The FSW from the current instruction.
7611 */
7612IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7613{
7614 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7615 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7616 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7617 iemFpuMaybePopOne(pFpuCtx);
7618 iemFpuMaybePopOne(pFpuCtx);
7619}
7620
7621
7622/**
7623 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7624 *
7625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7626 * @param u16FSW The FSW from the current instruction.
7627 * @param iEffSeg The effective memory operand selector register.
7628 * @param GCPtrEff The effective memory operand offset.
7629 */
7630IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7631{
7632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7633 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7635 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7636 iemFpuMaybePopOne(pFpuCtx);
7637}
7638
7639
7640/**
7641 * Worker routine for raising an FPU stack underflow exception.
7642 *
7643 * @param pFpuCtx The FPU context.
7644 * @param iStReg The stack register being accessed.
7645 */
7646IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7647{
7648 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7649 if (pFpuCtx->FCW & X86_FCW_IM)
7650 {
7651 /* Masked underflow. */
7652 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7653 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7654 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7655 if (iStReg != UINT8_MAX)
7656 {
7657 pFpuCtx->FTW |= RT_BIT(iReg);
7658 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7659 }
7660 }
7661 else
7662 {
7663 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7664 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7665 }
7666}
7667
7668
7669/**
7670 * Raises a FPU stack underflow exception.
7671 *
7672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7673 * @param iStReg The destination register that should be loaded
7674 * with QNaN if \#IS is not masked. Specify
7675 * UINT8_MAX if none (like for fcom).
7676 */
7677DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7678{
7679 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7680 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7681 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7682}
7683
7684
7685DECL_NO_INLINE(IEM_STATIC, void)
7686iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7687{
7688 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7689 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7690 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7691 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7692}
7693
7694
7695DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7696{
7697 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7698 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7699 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7700 iemFpuMaybePopOne(pFpuCtx);
7701}
7702
7703
7704DECL_NO_INLINE(IEM_STATIC, void)
7705iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7706{
7707 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7708 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7709 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7710 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7711 iemFpuMaybePopOne(pFpuCtx);
7712}
7713
7714
7715DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7716{
7717 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7718 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7719 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7720 iemFpuMaybePopOne(pFpuCtx);
7721 iemFpuMaybePopOne(pFpuCtx);
7722}
7723
7724
7725DECL_NO_INLINE(IEM_STATIC, void)
7726iemFpuStackPushUnderflow(PVMCPU pVCpu)
7727{
7728 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7729 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7730
7731 if (pFpuCtx->FCW & X86_FCW_IM)
7732 {
7733 /* Masked overflow - Push QNaN. */
7734 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7735 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7736 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7737 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7738 pFpuCtx->FTW |= RT_BIT(iNewTop);
7739 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7740 iemFpuRotateStackPush(pFpuCtx);
7741 }
7742 else
7743 {
7744 /* Exception pending - don't change TOP or the register stack. */
7745 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7746 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7747 }
7748}
7749
7750
7751DECL_NO_INLINE(IEM_STATIC, void)
7752iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7753{
7754 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7755 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7756
7757 if (pFpuCtx->FCW & X86_FCW_IM)
7758 {
7759 /* Masked overflow - Push QNaN. */
7760 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7761 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7762 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7763 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7764 pFpuCtx->FTW |= RT_BIT(iNewTop);
7765 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7766 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7767 iemFpuRotateStackPush(pFpuCtx);
7768 }
7769 else
7770 {
7771 /* Exception pending - don't change TOP or the register stack. */
7772 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7773 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7774 }
7775}
7776
7777
7778/**
7779 * Worker routine for raising an FPU stack overflow exception on a push.
7780 *
7781 * @param pFpuCtx The FPU context.
7782 */
7783IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7784{
7785 if (pFpuCtx->FCW & X86_FCW_IM)
7786 {
7787 /* Masked overflow. */
7788 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7789 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7790 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7791 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7792 pFpuCtx->FTW |= RT_BIT(iNewTop);
7793 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7794 iemFpuRotateStackPush(pFpuCtx);
7795 }
7796 else
7797 {
7798 /* Exception pending - don't change TOP or the register stack. */
7799 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7800 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7801 }
7802}
7803
7804
7805/**
7806 * Raises a FPU stack overflow exception on a push.
7807 *
7808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7809 */
7810DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7811{
7812 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7813 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7814 iemFpuStackPushOverflowOnly(pFpuCtx);
7815}
7816
7817
7818/**
7819 * Raises a FPU stack overflow exception on a push with a memory operand.
7820 *
7821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7822 * @param iEffSeg The effective memory operand selector register.
7823 * @param GCPtrEff The effective memory operand offset.
7824 */
7825DECL_NO_INLINE(IEM_STATIC, void)
7826iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7827{
7828 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7829 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7830 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7831 iemFpuStackPushOverflowOnly(pFpuCtx);
7832}
7833
7834
7835IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7836{
7837 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7838 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7839 if (pFpuCtx->FTW & RT_BIT(iReg))
7840 return VINF_SUCCESS;
7841 return VERR_NOT_FOUND;
7842}
7843
7844
7845IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7846{
7847 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7848 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7849 if (pFpuCtx->FTW & RT_BIT(iReg))
7850 {
7851 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7852 return VINF_SUCCESS;
7853 }
7854 return VERR_NOT_FOUND;
7855}
7856
7857
7858IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7859 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7860{
7861 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7862 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7863 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7864 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7865 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7866 {
7867 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7868 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7869 return VINF_SUCCESS;
7870 }
7871 return VERR_NOT_FOUND;
7872}
7873
7874
7875IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7876{
7877 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7878 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7879 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7880 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7881 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7882 {
7883 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7884 return VINF_SUCCESS;
7885 }
7886 return VERR_NOT_FOUND;
7887}
7888
7889
7890/**
7891 * Updates the FPU exception status after FCW is changed.
7892 *
7893 * @param pFpuCtx The FPU context.
7894 */
7895IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7896{
7897 uint16_t u16Fsw = pFpuCtx->FSW;
7898 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7899 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7900 else
7901 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7902 pFpuCtx->FSW = u16Fsw;
7903}
7904
7905
7906/**
7907 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7908 *
7909 * @returns The full FTW.
7910 * @param pFpuCtx The FPU context.
7911 */
7912IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7913{
7914 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7915 uint16_t u16Ftw = 0;
7916 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7917 for (unsigned iSt = 0; iSt < 8; iSt++)
7918 {
7919 unsigned const iReg = (iSt + iTop) & 7;
7920 if (!(u8Ftw & RT_BIT(iReg)))
7921 u16Ftw |= 3 << (iReg * 2); /* empty */
7922 else
7923 {
7924 uint16_t uTag;
7925 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7926 if (pr80Reg->s.uExponent == 0x7fff)
7927 uTag = 2; /* Exponent is all 1's => Special. */
7928 else if (pr80Reg->s.uExponent == 0x0000)
7929 {
7930 if (pr80Reg->s.u64Mantissa == 0x0000)
7931 uTag = 1; /* All bits are zero => Zero. */
7932 else
7933 uTag = 2; /* Must be special. */
7934 }
7935 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7936 uTag = 0; /* Valid. */
7937 else
7938 uTag = 2; /* Must be special. */
7939
7940 u16Ftw |= uTag << (iReg * 2); /* empty */
7941 }
7942 }
7943
7944 return u16Ftw;
7945}
7946
7947
7948/**
7949 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7950 *
7951 * @returns The compressed FTW.
7952 * @param u16FullFtw The full FTW to convert.
7953 */
7954IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7955{
7956 uint8_t u8Ftw = 0;
7957 for (unsigned i = 0; i < 8; i++)
7958 {
7959 if ((u16FullFtw & 3) != 3 /*empty*/)
7960 u8Ftw |= RT_BIT(i);
7961 u16FullFtw >>= 2;
7962 }
7963
7964 return u8Ftw;
7965}
7966
7967/** @} */
7968
7969
7970/** @name Memory access.
7971 *
7972 * @{
7973 */
7974
7975
7976/**
7977 * Updates the IEMCPU::cbWritten counter if applicable.
7978 *
7979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7980 * @param fAccess The access being accounted for.
7981 * @param cbMem The access size.
7982 */
7983DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7984{
7985 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7986 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7987 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7988}
7989
7990
7991/**
7992 * Checks if the given segment can be written to, raise the appropriate
7993 * exception if not.
7994 *
7995 * @returns VBox strict status code.
7996 *
7997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7998 * @param pHid Pointer to the hidden register.
7999 * @param iSegReg The register number.
8000 * @param pu64BaseAddr Where to return the base address to use for the
8001 * segment. (In 64-bit code it may differ from the
8002 * base in the hidden segment.)
8003 */
8004IEM_STATIC VBOXSTRICTRC
8005iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8006{
8007 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8008
8009 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8010 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8011 else
8012 {
8013 if (!pHid->Attr.n.u1Present)
8014 {
8015 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8016 AssertRelease(uSel == 0);
8017 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8018 return iemRaiseGeneralProtectionFault0(pVCpu);
8019 }
8020
8021 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8022 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8023 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8024 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8025 *pu64BaseAddr = pHid->u64Base;
8026 }
8027 return VINF_SUCCESS;
8028}
8029
8030
8031/**
8032 * Checks if the given segment can be read from, raise the appropriate
8033 * exception if not.
8034 *
8035 * @returns VBox strict status code.
8036 *
8037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8038 * @param pHid Pointer to the hidden register.
8039 * @param iSegReg The register number.
8040 * @param pu64BaseAddr Where to return the base address to use for the
8041 * segment. (In 64-bit code it may differ from the
8042 * base in the hidden segment.)
8043 */
8044IEM_STATIC VBOXSTRICTRC
8045iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8046{
8047 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8048
8049 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8050 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8051 else
8052 {
8053 if (!pHid->Attr.n.u1Present)
8054 {
8055 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8056 AssertRelease(uSel == 0);
8057 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8058 return iemRaiseGeneralProtectionFault0(pVCpu);
8059 }
8060
8061 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8062 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8063 *pu64BaseAddr = pHid->u64Base;
8064 }
8065 return VINF_SUCCESS;
8066}
8067
8068
8069/**
8070 * Applies the segment limit, base and attributes.
8071 *
8072 * This may raise a \#GP or \#SS.
8073 *
8074 * @returns VBox strict status code.
8075 *
8076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8077 * @param fAccess The kind of access which is being performed.
8078 * @param iSegReg The index of the segment register to apply.
8079 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8080 * TSS, ++).
8081 * @param cbMem The access size.
8082 * @param pGCPtrMem Pointer to the guest memory address to apply
8083 * segmentation to. Input and output parameter.
8084 */
8085IEM_STATIC VBOXSTRICTRC
8086iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8087{
8088 if (iSegReg == UINT8_MAX)
8089 return VINF_SUCCESS;
8090
8091 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8092 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8093 switch (pVCpu->iem.s.enmCpuMode)
8094 {
8095 case IEMMODE_16BIT:
8096 case IEMMODE_32BIT:
8097 {
8098 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8099 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8100
8101 if ( pSel->Attr.n.u1Present
8102 && !pSel->Attr.n.u1Unusable)
8103 {
8104 Assert(pSel->Attr.n.u1DescType);
8105 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8106 {
8107 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8108 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8109 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8110
8111 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8112 {
8113 /** @todo CPL check. */
8114 }
8115
8116 /*
8117 * There are two kinds of data selectors, normal and expand down.
8118 */
8119 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8120 {
8121 if ( GCPtrFirst32 > pSel->u32Limit
8122 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8123 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8124 }
8125 else
8126 {
8127 /*
8128 * The upper boundary is defined by the B bit, not the G bit!
8129 */
8130 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8131 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8132 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8133 }
8134 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8135 }
8136 else
8137 {
8138
8139 /*
8140 * Code selector and usually be used to read thru, writing is
8141 * only permitted in real and V8086 mode.
8142 */
8143 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8144 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8145 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8146 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8147 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8148
8149 if ( GCPtrFirst32 > pSel->u32Limit
8150 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8151 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8152
8153 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8154 {
8155 /** @todo CPL check. */
8156 }
8157
8158 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8159 }
8160 }
8161 else
8162 return iemRaiseGeneralProtectionFault0(pVCpu);
8163 return VINF_SUCCESS;
8164 }
8165
8166 case IEMMODE_64BIT:
8167 {
8168 RTGCPTR GCPtrMem = *pGCPtrMem;
8169 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8170 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8171
8172 Assert(cbMem >= 1);
8173 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8174 return VINF_SUCCESS;
8175 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8176 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8177 return iemRaiseGeneralProtectionFault0(pVCpu);
8178 }
8179
8180 default:
8181 AssertFailedReturn(VERR_IEM_IPE_7);
8182 }
8183}
8184
8185
8186/**
8187 * Translates a virtual address to a physical physical address and checks if we
8188 * can access the page as specified.
8189 *
8190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8191 * @param GCPtrMem The virtual address.
8192 * @param fAccess The intended access.
8193 * @param pGCPhysMem Where to return the physical address.
8194 */
8195IEM_STATIC VBOXSTRICTRC
8196iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8197{
8198 /** @todo Need a different PGM interface here. We're currently using
8199 * generic / REM interfaces. this won't cut it for R0 & RC. */
8200 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8201 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8202 RTGCPHYS GCPhys;
8203 uint64_t fFlags;
8204 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8205 if (RT_FAILURE(rc))
8206 {
8207 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8208 /** @todo Check unassigned memory in unpaged mode. */
8209 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8210 *pGCPhysMem = NIL_RTGCPHYS;
8211 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8212 }
8213
8214 /* If the page is writable and does not have the no-exec bit set, all
8215 access is allowed. Otherwise we'll have to check more carefully... */
8216 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8217 {
8218 /* Write to read only memory? */
8219 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8220 && !(fFlags & X86_PTE_RW)
8221 && ( (pVCpu->iem.s.uCpl == 3
8222 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8223 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8224 {
8225 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8226 *pGCPhysMem = NIL_RTGCPHYS;
8227 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8228 }
8229
8230 /* Kernel memory accessed by userland? */
8231 if ( !(fFlags & X86_PTE_US)
8232 && pVCpu->iem.s.uCpl == 3
8233 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8234 {
8235 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8236 *pGCPhysMem = NIL_RTGCPHYS;
8237 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8238 }
8239
8240 /* Executing non-executable memory? */
8241 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8242 && (fFlags & X86_PTE_PAE_NX)
8243 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8244 {
8245 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8246 *pGCPhysMem = NIL_RTGCPHYS;
8247 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8248 VERR_ACCESS_DENIED);
8249 }
8250 }
8251
8252 /*
8253 * Set the dirty / access flags.
8254 * ASSUMES this is set when the address is translated rather than on committ...
8255 */
8256 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8257 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8258 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8259 {
8260 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8261 AssertRC(rc2);
8262 }
8263
8264 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8265 *pGCPhysMem = GCPhys;
8266 return VINF_SUCCESS;
8267}
8268
8269
8270
8271/**
8272 * Maps a physical page.
8273 *
8274 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8276 * @param GCPhysMem The physical address.
8277 * @param fAccess The intended access.
8278 * @param ppvMem Where to return the mapping address.
8279 * @param pLock The PGM lock.
8280 */
8281IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8282{
8283#ifdef IEM_LOG_MEMORY_WRITES
8284 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8285 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8286#endif
8287
8288 /** @todo This API may require some improving later. A private deal with PGM
8289 * regarding locking and unlocking needs to be struct. A couple of TLBs
8290 * living in PGM, but with publicly accessible inlined access methods
8291 * could perhaps be an even better solution. */
8292 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8293 GCPhysMem,
8294 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8295 pVCpu->iem.s.fBypassHandlers,
8296 ppvMem,
8297 pLock);
8298 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8299 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8300
8301 return rc;
8302}
8303
8304
8305/**
8306 * Unmap a page previously mapped by iemMemPageMap.
8307 *
8308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8309 * @param GCPhysMem The physical address.
8310 * @param fAccess The intended access.
8311 * @param pvMem What iemMemPageMap returned.
8312 * @param pLock The PGM lock.
8313 */
8314DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8315{
8316 NOREF(pVCpu);
8317 NOREF(GCPhysMem);
8318 NOREF(fAccess);
8319 NOREF(pvMem);
8320 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8321}
8322
8323
8324/**
8325 * Looks up a memory mapping entry.
8326 *
8327 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8329 * @param pvMem The memory address.
8330 * @param fAccess The access to.
8331 */
8332DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8333{
8334 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8335 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8336 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8337 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8338 return 0;
8339 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8340 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8341 return 1;
8342 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8343 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8344 return 2;
8345 return VERR_NOT_FOUND;
8346}
8347
8348
8349/**
8350 * Finds a free memmap entry when using iNextMapping doesn't work.
8351 *
8352 * @returns Memory mapping index, 1024 on failure.
8353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8354 */
8355IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8356{
8357 /*
8358 * The easy case.
8359 */
8360 if (pVCpu->iem.s.cActiveMappings == 0)
8361 {
8362 pVCpu->iem.s.iNextMapping = 1;
8363 return 0;
8364 }
8365
8366 /* There should be enough mappings for all instructions. */
8367 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8368
8369 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8370 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8371 return i;
8372
8373 AssertFailedReturn(1024);
8374}
8375
8376
8377/**
8378 * Commits a bounce buffer that needs writing back and unmaps it.
8379 *
8380 * @returns Strict VBox status code.
8381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8382 * @param iMemMap The index of the buffer to commit.
8383 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8384 * Always false in ring-3, obviously.
8385 */
8386IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8387{
8388 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8389 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8390#ifdef IN_RING3
8391 Assert(!fPostponeFail);
8392 RT_NOREF_PV(fPostponeFail);
8393#endif
8394
8395 /*
8396 * Do the writing.
8397 */
8398 PVM pVM = pVCpu->CTX_SUFF(pVM);
8399 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8400 {
8401 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8402 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8403 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8404 if (!pVCpu->iem.s.fBypassHandlers)
8405 {
8406 /*
8407 * Carefully and efficiently dealing with access handler return
8408 * codes make this a little bloated.
8409 */
8410 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8412 pbBuf,
8413 cbFirst,
8414 PGMACCESSORIGIN_IEM);
8415 if (rcStrict == VINF_SUCCESS)
8416 {
8417 if (cbSecond)
8418 {
8419 rcStrict = PGMPhysWrite(pVM,
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8421 pbBuf + cbFirst,
8422 cbSecond,
8423 PGMACCESSORIGIN_IEM);
8424 if (rcStrict == VINF_SUCCESS)
8425 { /* nothing */ }
8426 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8427 {
8428 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8429 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8430 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8431 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8432 }
8433#ifndef IN_RING3
8434 else if (fPostponeFail)
8435 {
8436 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8437 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8439 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8440 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8441 return iemSetPassUpStatus(pVCpu, rcStrict);
8442 }
8443#endif
8444 else
8445 {
8446 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8447 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8448 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8449 return rcStrict;
8450 }
8451 }
8452 }
8453 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8454 {
8455 if (!cbSecond)
8456 {
8457 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8459 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8460 }
8461 else
8462 {
8463 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8465 pbBuf + cbFirst,
8466 cbSecond,
8467 PGMACCESSORIGIN_IEM);
8468 if (rcStrict2 == VINF_SUCCESS)
8469 {
8470 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8471 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8472 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8473 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8474 }
8475 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8476 {
8477 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8480 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8481 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8482 }
8483#ifndef IN_RING3
8484 else if (fPostponeFail)
8485 {
8486 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8489 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8490 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8491 return iemSetPassUpStatus(pVCpu, rcStrict);
8492 }
8493#endif
8494 else
8495 {
8496 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8497 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8498 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8499 return rcStrict2;
8500 }
8501 }
8502 }
8503#ifndef IN_RING3
8504 else if (fPostponeFail)
8505 {
8506 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8507 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8508 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8509 if (!cbSecond)
8510 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8511 else
8512 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8513 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8514 return iemSetPassUpStatus(pVCpu, rcStrict);
8515 }
8516#endif
8517 else
8518 {
8519 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8520 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8521 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8522 return rcStrict;
8523 }
8524 }
8525 else
8526 {
8527 /*
8528 * No access handlers, much simpler.
8529 */
8530 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8531 if (RT_SUCCESS(rc))
8532 {
8533 if (cbSecond)
8534 {
8535 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8536 if (RT_SUCCESS(rc))
8537 { /* likely */ }
8538 else
8539 {
8540 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8541 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8542 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8543 return rc;
8544 }
8545 }
8546 }
8547 else
8548 {
8549 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8550 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8551 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8552 return rc;
8553 }
8554 }
8555 }
8556
8557#if defined(IEM_LOG_MEMORY_WRITES)
8558 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8559 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8560 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8561 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8562 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8563 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8564
8565 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8566 g_cbIemWrote = cbWrote;
8567 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8568#endif
8569
8570 /*
8571 * Free the mapping entry.
8572 */
8573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8574 Assert(pVCpu->iem.s.cActiveMappings != 0);
8575 pVCpu->iem.s.cActiveMappings--;
8576 return VINF_SUCCESS;
8577}
8578
8579
8580/**
8581 * iemMemMap worker that deals with a request crossing pages.
8582 */
8583IEM_STATIC VBOXSTRICTRC
8584iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8585{
8586 /*
8587 * Do the address translations.
8588 */
8589 RTGCPHYS GCPhysFirst;
8590 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8591 if (rcStrict != VINF_SUCCESS)
8592 return rcStrict;
8593
8594 RTGCPHYS GCPhysSecond;
8595 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8596 fAccess, &GCPhysSecond);
8597 if (rcStrict != VINF_SUCCESS)
8598 return rcStrict;
8599 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8600
8601 PVM pVM = pVCpu->CTX_SUFF(pVM);
8602
8603 /*
8604 * Read in the current memory content if it's a read, execute or partial
8605 * write access.
8606 */
8607 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8608 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8609 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8610
8611 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8612 {
8613 if (!pVCpu->iem.s.fBypassHandlers)
8614 {
8615 /*
8616 * Must carefully deal with access handler status codes here,
8617 * makes the code a bit bloated.
8618 */
8619 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8620 if (rcStrict == VINF_SUCCESS)
8621 {
8622 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8623 if (rcStrict == VINF_SUCCESS)
8624 { /*likely */ }
8625 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8626 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8627 else
8628 {
8629 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8630 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8631 return rcStrict;
8632 }
8633 }
8634 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8635 {
8636 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8637 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8638 {
8639 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8640 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8641 }
8642 else
8643 {
8644 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8645 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8646 return rcStrict2;
8647 }
8648 }
8649 else
8650 {
8651 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8652 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8653 return rcStrict;
8654 }
8655 }
8656 else
8657 {
8658 /*
8659 * No informational status codes here, much more straight forward.
8660 */
8661 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8662 if (RT_SUCCESS(rc))
8663 {
8664 Assert(rc == VINF_SUCCESS);
8665 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8666 if (RT_SUCCESS(rc))
8667 Assert(rc == VINF_SUCCESS);
8668 else
8669 {
8670 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8671 return rc;
8672 }
8673 }
8674 else
8675 {
8676 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8677 return rc;
8678 }
8679 }
8680 }
8681#ifdef VBOX_STRICT
8682 else
8683 memset(pbBuf, 0xcc, cbMem);
8684 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8685 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8686#endif
8687
8688 /*
8689 * Commit the bounce buffer entry.
8690 */
8691 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8692 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8693 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8694 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8695 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8696 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8697 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8698 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8699 pVCpu->iem.s.cActiveMappings++;
8700
8701 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8702 *ppvMem = pbBuf;
8703 return VINF_SUCCESS;
8704}
8705
8706
8707/**
8708 * iemMemMap woker that deals with iemMemPageMap failures.
8709 */
8710IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8711 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8712{
8713 /*
8714 * Filter out conditions we can handle and the ones which shouldn't happen.
8715 */
8716 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8717 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8718 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8719 {
8720 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8721 return rcMap;
8722 }
8723 pVCpu->iem.s.cPotentialExits++;
8724
8725 /*
8726 * Read in the current memory content if it's a read, execute or partial
8727 * write access.
8728 */
8729 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8730 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8731 {
8732 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8733 memset(pbBuf, 0xff, cbMem);
8734 else
8735 {
8736 int rc;
8737 if (!pVCpu->iem.s.fBypassHandlers)
8738 {
8739 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8740 if (rcStrict == VINF_SUCCESS)
8741 { /* nothing */ }
8742 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8743 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8744 else
8745 {
8746 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8747 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8748 return rcStrict;
8749 }
8750 }
8751 else
8752 {
8753 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8754 if (RT_SUCCESS(rc))
8755 { /* likely */ }
8756 else
8757 {
8758 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8759 GCPhysFirst, rc));
8760 return rc;
8761 }
8762 }
8763 }
8764 }
8765#ifdef VBOX_STRICT
8766 else
8767 memset(pbBuf, 0xcc, cbMem);
8768#endif
8769#ifdef VBOX_STRICT
8770 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8771 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8772#endif
8773
8774 /*
8775 * Commit the bounce buffer entry.
8776 */
8777 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8778 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8779 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8780 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8781 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8782 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8783 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8784 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8785 pVCpu->iem.s.cActiveMappings++;
8786
8787 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8788 *ppvMem = pbBuf;
8789 return VINF_SUCCESS;
8790}
8791
8792
8793
8794/**
8795 * Maps the specified guest memory for the given kind of access.
8796 *
8797 * This may be using bounce buffering of the memory if it's crossing a page
8798 * boundary or if there is an access handler installed for any of it. Because
8799 * of lock prefix guarantees, we're in for some extra clutter when this
8800 * happens.
8801 *
8802 * This may raise a \#GP, \#SS, \#PF or \#AC.
8803 *
8804 * @returns VBox strict status code.
8805 *
8806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8807 * @param ppvMem Where to return the pointer to the mapped
8808 * memory.
8809 * @param cbMem The number of bytes to map. This is usually 1,
8810 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8811 * string operations it can be up to a page.
8812 * @param iSegReg The index of the segment register to use for
8813 * this access. The base and limits are checked.
8814 * Use UINT8_MAX to indicate that no segmentation
8815 * is required (for IDT, GDT and LDT accesses).
8816 * @param GCPtrMem The address of the guest memory.
8817 * @param fAccess How the memory is being accessed. The
8818 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8819 * how to map the memory, while the
8820 * IEM_ACCESS_WHAT_XXX bit is used when raising
8821 * exceptions.
8822 */
8823IEM_STATIC VBOXSTRICTRC
8824iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8825{
8826 /*
8827 * Check the input and figure out which mapping entry to use.
8828 */
8829 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8830 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8831 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8832
8833 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8834 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8835 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8836 {
8837 iMemMap = iemMemMapFindFree(pVCpu);
8838 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8839 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8840 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8841 pVCpu->iem.s.aMemMappings[2].fAccess),
8842 VERR_IEM_IPE_9);
8843 }
8844
8845 /*
8846 * Map the memory, checking that we can actually access it. If something
8847 * slightly complicated happens, fall back on bounce buffering.
8848 */
8849 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8850 if (rcStrict != VINF_SUCCESS)
8851 return rcStrict;
8852
8853 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8854 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8855
8856 RTGCPHYS GCPhysFirst;
8857 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8858 if (rcStrict != VINF_SUCCESS)
8859 return rcStrict;
8860
8861 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8862 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8863 if (fAccess & IEM_ACCESS_TYPE_READ)
8864 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8865
8866 void *pvMem;
8867 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8868 if (rcStrict != VINF_SUCCESS)
8869 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8870
8871 /*
8872 * Fill in the mapping table entry.
8873 */
8874 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8875 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8876 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8877 pVCpu->iem.s.cActiveMappings++;
8878
8879 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8880 *ppvMem = pvMem;
8881 return VINF_SUCCESS;
8882}
8883
8884
8885/**
8886 * Commits the guest memory if bounce buffered and unmaps it.
8887 *
8888 * @returns Strict VBox status code.
8889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8890 * @param pvMem The mapping.
8891 * @param fAccess The kind of access.
8892 */
8893IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8894{
8895 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8896 AssertReturn(iMemMap >= 0, iMemMap);
8897
8898 /* If it's bounce buffered, we may need to write back the buffer. */
8899 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8900 {
8901 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8902 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8903 }
8904 /* Otherwise unlock it. */
8905 else
8906 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8907
8908 /* Free the entry. */
8909 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8910 Assert(pVCpu->iem.s.cActiveMappings != 0);
8911 pVCpu->iem.s.cActiveMappings--;
8912 return VINF_SUCCESS;
8913}
8914
8915#ifdef IEM_WITH_SETJMP
8916
8917/**
8918 * Maps the specified guest memory for the given kind of access, longjmp on
8919 * error.
8920 *
8921 * This may be using bounce buffering of the memory if it's crossing a page
8922 * boundary or if there is an access handler installed for any of it. Because
8923 * of lock prefix guarantees, we're in for some extra clutter when this
8924 * happens.
8925 *
8926 * This may raise a \#GP, \#SS, \#PF or \#AC.
8927 *
8928 * @returns Pointer to the mapped memory.
8929 *
8930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8931 * @param cbMem The number of bytes to map. This is usually 1,
8932 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8933 * string operations it can be up to a page.
8934 * @param iSegReg The index of the segment register to use for
8935 * this access. The base and limits are checked.
8936 * Use UINT8_MAX to indicate that no segmentation
8937 * is required (for IDT, GDT and LDT accesses).
8938 * @param GCPtrMem The address of the guest memory.
8939 * @param fAccess How the memory is being accessed. The
8940 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8941 * how to map the memory, while the
8942 * IEM_ACCESS_WHAT_XXX bit is used when raising
8943 * exceptions.
8944 */
8945IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8946{
8947 /*
8948 * Check the input and figure out which mapping entry to use.
8949 */
8950 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8951 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8952 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8953
8954 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8955 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8956 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8957 {
8958 iMemMap = iemMemMapFindFree(pVCpu);
8959 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8960 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8961 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8962 pVCpu->iem.s.aMemMappings[2].fAccess),
8963 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8964 }
8965
8966 /*
8967 * Map the memory, checking that we can actually access it. If something
8968 * slightly complicated happens, fall back on bounce buffering.
8969 */
8970 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8971 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8972 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8973
8974 /* Crossing a page boundary? */
8975 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8976 { /* No (likely). */ }
8977 else
8978 {
8979 void *pvMem;
8980 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8981 if (rcStrict == VINF_SUCCESS)
8982 return pvMem;
8983 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8984 }
8985
8986 RTGCPHYS GCPhysFirst;
8987 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8988 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8989 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8990
8991 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8992 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8993 if (fAccess & IEM_ACCESS_TYPE_READ)
8994 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8995
8996 void *pvMem;
8997 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8998 if (rcStrict == VINF_SUCCESS)
8999 { /* likely */ }
9000 else
9001 {
9002 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9003 if (rcStrict == VINF_SUCCESS)
9004 return pvMem;
9005 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9006 }
9007
9008 /*
9009 * Fill in the mapping table entry.
9010 */
9011 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9012 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9013 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9014 pVCpu->iem.s.cActiveMappings++;
9015
9016 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9017 return pvMem;
9018}
9019
9020
9021/**
9022 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9023 *
9024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9025 * @param pvMem The mapping.
9026 * @param fAccess The kind of access.
9027 */
9028IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9029{
9030 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9031 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9032
9033 /* If it's bounce buffered, we may need to write back the buffer. */
9034 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9035 {
9036 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9037 {
9038 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9039 if (rcStrict == VINF_SUCCESS)
9040 return;
9041 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9042 }
9043 }
9044 /* Otherwise unlock it. */
9045 else
9046 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9047
9048 /* Free the entry. */
9049 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9050 Assert(pVCpu->iem.s.cActiveMappings != 0);
9051 pVCpu->iem.s.cActiveMappings--;
9052}
9053
9054#endif /* IEM_WITH_SETJMP */
9055
9056#ifndef IN_RING3
9057/**
9058 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9059 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9060 *
9061 * Allows the instruction to be completed and retired, while the IEM user will
9062 * return to ring-3 immediately afterwards and do the postponed writes there.
9063 *
9064 * @returns VBox status code (no strict statuses). Caller must check
9065 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9067 * @param pvMem The mapping.
9068 * @param fAccess The kind of access.
9069 */
9070IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9071{
9072 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9073 AssertReturn(iMemMap >= 0, iMemMap);
9074
9075 /* If it's bounce buffered, we may need to write back the buffer. */
9076 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9077 {
9078 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9079 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9080 }
9081 /* Otherwise unlock it. */
9082 else
9083 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9084
9085 /* Free the entry. */
9086 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9087 Assert(pVCpu->iem.s.cActiveMappings != 0);
9088 pVCpu->iem.s.cActiveMappings--;
9089 return VINF_SUCCESS;
9090}
9091#endif
9092
9093
9094/**
9095 * Rollbacks mappings, releasing page locks and such.
9096 *
9097 * The caller shall only call this after checking cActiveMappings.
9098 *
9099 * @returns Strict VBox status code to pass up.
9100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9101 */
9102IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9103{
9104 Assert(pVCpu->iem.s.cActiveMappings > 0);
9105
9106 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9107 while (iMemMap-- > 0)
9108 {
9109 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9110 if (fAccess != IEM_ACCESS_INVALID)
9111 {
9112 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9113 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9114 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9115 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9116 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9117 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9118 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9119 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9120 pVCpu->iem.s.cActiveMappings--;
9121 }
9122 }
9123}
9124
9125
9126/**
9127 * Fetches a data byte.
9128 *
9129 * @returns Strict VBox status code.
9130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9131 * @param pu8Dst Where to return the byte.
9132 * @param iSegReg The index of the segment register to use for
9133 * this access. The base and limits are checked.
9134 * @param GCPtrMem The address of the guest memory.
9135 */
9136IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9137{
9138 /* The lazy approach for now... */
9139 uint8_t const *pu8Src;
9140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9141 if (rc == VINF_SUCCESS)
9142 {
9143 *pu8Dst = *pu8Src;
9144 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9145 }
9146 return rc;
9147}
9148
9149
9150#ifdef IEM_WITH_SETJMP
9151/**
9152 * Fetches a data byte, longjmp on error.
9153 *
9154 * @returns The byte.
9155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9156 * @param iSegReg The index of the segment register to use for
9157 * this access. The base and limits are checked.
9158 * @param GCPtrMem The address of the guest memory.
9159 */
9160DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9161{
9162 /* The lazy approach for now... */
9163 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9164 uint8_t const bRet = *pu8Src;
9165 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9166 return bRet;
9167}
9168#endif /* IEM_WITH_SETJMP */
9169
9170
9171/**
9172 * Fetches a data word.
9173 *
9174 * @returns Strict VBox status code.
9175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9176 * @param pu16Dst Where to return the word.
9177 * @param iSegReg The index of the segment register to use for
9178 * this access. The base and limits are checked.
9179 * @param GCPtrMem The address of the guest memory.
9180 */
9181IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9182{
9183 /* The lazy approach for now... */
9184 uint16_t const *pu16Src;
9185 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9186 if (rc == VINF_SUCCESS)
9187 {
9188 *pu16Dst = *pu16Src;
9189 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9190 }
9191 return rc;
9192}
9193
9194
9195#ifdef IEM_WITH_SETJMP
9196/**
9197 * Fetches a data word, longjmp on error.
9198 *
9199 * @returns The word
9200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9201 * @param iSegReg The index of the segment register to use for
9202 * this access. The base and limits are checked.
9203 * @param GCPtrMem The address of the guest memory.
9204 */
9205DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9206{
9207 /* The lazy approach for now... */
9208 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9209 uint16_t const u16Ret = *pu16Src;
9210 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9211 return u16Ret;
9212}
9213#endif
9214
9215
9216/**
9217 * Fetches a data dword.
9218 *
9219 * @returns Strict VBox status code.
9220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9221 * @param pu32Dst Where to return the dword.
9222 * @param iSegReg The index of the segment register to use for
9223 * this access. The base and limits are checked.
9224 * @param GCPtrMem The address of the guest memory.
9225 */
9226IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9227{
9228 /* The lazy approach for now... */
9229 uint32_t const *pu32Src;
9230 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9231 if (rc == VINF_SUCCESS)
9232 {
9233 *pu32Dst = *pu32Src;
9234 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9235 }
9236 return rc;
9237}
9238
9239
9240#ifdef IEM_WITH_SETJMP
9241
9242IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9243{
9244 Assert(cbMem >= 1);
9245 Assert(iSegReg < X86_SREG_COUNT);
9246
9247 /*
9248 * 64-bit mode is simpler.
9249 */
9250 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9251 {
9252 if (iSegReg >= X86_SREG_FS)
9253 {
9254 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9255 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9256 GCPtrMem += pSel->u64Base;
9257 }
9258
9259 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9260 return GCPtrMem;
9261 }
9262 /*
9263 * 16-bit and 32-bit segmentation.
9264 */
9265 else
9266 {
9267 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9268 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9269 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9270 == X86DESCATTR_P /* data, expand up */
9271 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9272 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9273 {
9274 /* expand up */
9275 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9276 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9277 && GCPtrLast32 > (uint32_t)GCPtrMem))
9278 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9279 }
9280 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9281 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9282 {
9283 /* expand down */
9284 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9285 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9286 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9287 && GCPtrLast32 > (uint32_t)GCPtrMem))
9288 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9289 }
9290 else
9291 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9292 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9293 }
9294 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9295}
9296
9297
9298IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9299{
9300 Assert(cbMem >= 1);
9301 Assert(iSegReg < X86_SREG_COUNT);
9302
9303 /*
9304 * 64-bit mode is simpler.
9305 */
9306 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9307 {
9308 if (iSegReg >= X86_SREG_FS)
9309 {
9310 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9311 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9312 GCPtrMem += pSel->u64Base;
9313 }
9314
9315 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9316 return GCPtrMem;
9317 }
9318 /*
9319 * 16-bit and 32-bit segmentation.
9320 */
9321 else
9322 {
9323 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9324 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9325 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9326 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9327 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9328 {
9329 /* expand up */
9330 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9331 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9332 && GCPtrLast32 > (uint32_t)GCPtrMem))
9333 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9334 }
9335 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9336 {
9337 /* expand down */
9338 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9339 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9340 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9341 && GCPtrLast32 > (uint32_t)GCPtrMem))
9342 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9343 }
9344 else
9345 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9346 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9347 }
9348 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9349}
9350
9351
9352/**
9353 * Fetches a data dword, longjmp on error, fallback/safe version.
9354 *
9355 * @returns The dword
9356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9357 * @param iSegReg The index of the segment register to use for
9358 * this access. The base and limits are checked.
9359 * @param GCPtrMem The address of the guest memory.
9360 */
9361IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9362{
9363 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9364 uint32_t const u32Ret = *pu32Src;
9365 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9366 return u32Ret;
9367}
9368
9369
9370/**
9371 * Fetches a data dword, longjmp on error.
9372 *
9373 * @returns The dword
9374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9375 * @param iSegReg The index of the segment register to use for
9376 * this access. The base and limits are checked.
9377 * @param GCPtrMem The address of the guest memory.
9378 */
9379DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9380{
9381# ifdef IEM_WITH_DATA_TLB
9382 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9383 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9384 {
9385 /// @todo more later.
9386 }
9387
9388 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9389# else
9390 /* The lazy approach. */
9391 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9392 uint32_t const u32Ret = *pu32Src;
9393 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9394 return u32Ret;
9395# endif
9396}
9397#endif
9398
9399
9400#ifdef SOME_UNUSED_FUNCTION
9401/**
9402 * Fetches a data dword and sign extends it to a qword.
9403 *
9404 * @returns Strict VBox status code.
9405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9406 * @param pu64Dst Where to return the sign extended value.
9407 * @param iSegReg The index of the segment register to use for
9408 * this access. The base and limits are checked.
9409 * @param GCPtrMem The address of the guest memory.
9410 */
9411IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9412{
9413 /* The lazy approach for now... */
9414 int32_t const *pi32Src;
9415 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9416 if (rc == VINF_SUCCESS)
9417 {
9418 *pu64Dst = *pi32Src;
9419 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9420 }
9421#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9422 else
9423 *pu64Dst = 0;
9424#endif
9425 return rc;
9426}
9427#endif
9428
9429
9430/**
9431 * Fetches a data qword.
9432 *
9433 * @returns Strict VBox status code.
9434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9435 * @param pu64Dst Where to return the qword.
9436 * @param iSegReg The index of the segment register to use for
9437 * this access. The base and limits are checked.
9438 * @param GCPtrMem The address of the guest memory.
9439 */
9440IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9441{
9442 /* The lazy approach for now... */
9443 uint64_t const *pu64Src;
9444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9445 if (rc == VINF_SUCCESS)
9446 {
9447 *pu64Dst = *pu64Src;
9448 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9449 }
9450 return rc;
9451}
9452
9453
9454#ifdef IEM_WITH_SETJMP
9455/**
9456 * Fetches a data qword, longjmp on error.
9457 *
9458 * @returns The qword.
9459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9460 * @param iSegReg The index of the segment register to use for
9461 * this access. The base and limits are checked.
9462 * @param GCPtrMem The address of the guest memory.
9463 */
9464DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9465{
9466 /* The lazy approach for now... */
9467 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9468 uint64_t const u64Ret = *pu64Src;
9469 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9470 return u64Ret;
9471}
9472#endif
9473
9474
9475/**
9476 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9477 *
9478 * @returns Strict VBox status code.
9479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9480 * @param pu64Dst Where to return the qword.
9481 * @param iSegReg The index of the segment register to use for
9482 * this access. The base and limits are checked.
9483 * @param GCPtrMem The address of the guest memory.
9484 */
9485IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9486{
9487 /* The lazy approach for now... */
9488 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9489 if (RT_UNLIKELY(GCPtrMem & 15))
9490 return iemRaiseGeneralProtectionFault0(pVCpu);
9491
9492 uint64_t const *pu64Src;
9493 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9494 if (rc == VINF_SUCCESS)
9495 {
9496 *pu64Dst = *pu64Src;
9497 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9498 }
9499 return rc;
9500}
9501
9502
9503#ifdef IEM_WITH_SETJMP
9504/**
9505 * Fetches a data qword, longjmp on error.
9506 *
9507 * @returns The qword.
9508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9509 * @param iSegReg The index of the segment register to use for
9510 * this access. The base and limits are checked.
9511 * @param GCPtrMem The address of the guest memory.
9512 */
9513DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9514{
9515 /* The lazy approach for now... */
9516 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9517 if (RT_LIKELY(!(GCPtrMem & 15)))
9518 {
9519 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9520 uint64_t const u64Ret = *pu64Src;
9521 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9522 return u64Ret;
9523 }
9524
9525 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9526 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9527}
9528#endif
9529
9530
9531/**
9532 * Fetches a data tword.
9533 *
9534 * @returns Strict VBox status code.
9535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9536 * @param pr80Dst Where to return the tword.
9537 * @param iSegReg The index of the segment register to use for
9538 * this access. The base and limits are checked.
9539 * @param GCPtrMem The address of the guest memory.
9540 */
9541IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9542{
9543 /* The lazy approach for now... */
9544 PCRTFLOAT80U pr80Src;
9545 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9546 if (rc == VINF_SUCCESS)
9547 {
9548 *pr80Dst = *pr80Src;
9549 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9550 }
9551 return rc;
9552}
9553
9554
9555#ifdef IEM_WITH_SETJMP
9556/**
9557 * Fetches a data tword, longjmp on error.
9558 *
9559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9560 * @param pr80Dst Where to return the tword.
9561 * @param iSegReg The index of the segment register to use for
9562 * this access. The base and limits are checked.
9563 * @param GCPtrMem The address of the guest memory.
9564 */
9565DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9566{
9567 /* The lazy approach for now... */
9568 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9569 *pr80Dst = *pr80Src;
9570 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9571}
9572#endif
9573
9574
9575/**
9576 * Fetches a data dqword (double qword), generally SSE related.
9577 *
9578 * @returns Strict VBox status code.
9579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9580 * @param pu128Dst Where to return the qword.
9581 * @param iSegReg The index of the segment register to use for
9582 * this access. The base and limits are checked.
9583 * @param GCPtrMem The address of the guest memory.
9584 */
9585IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9586{
9587 /* The lazy approach for now... */
9588 PCRTUINT128U pu128Src;
9589 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9590 if (rc == VINF_SUCCESS)
9591 {
9592 pu128Dst->au64[0] = pu128Src->au64[0];
9593 pu128Dst->au64[1] = pu128Src->au64[1];
9594 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9595 }
9596 return rc;
9597}
9598
9599
9600#ifdef IEM_WITH_SETJMP
9601/**
9602 * Fetches a data dqword (double qword), generally SSE related.
9603 *
9604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9605 * @param pu128Dst Where to return the qword.
9606 * @param iSegReg The index of the segment register to use for
9607 * this access. The base and limits are checked.
9608 * @param GCPtrMem The address of the guest memory.
9609 */
9610IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9611{
9612 /* The lazy approach for now... */
9613 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9614 pu128Dst->au64[0] = pu128Src->au64[0];
9615 pu128Dst->au64[1] = pu128Src->au64[1];
9616 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9617}
9618#endif
9619
9620
9621/**
9622 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9623 * related.
9624 *
9625 * Raises \#GP(0) if not aligned.
9626 *
9627 * @returns Strict VBox status code.
9628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9629 * @param pu128Dst Where to return the qword.
9630 * @param iSegReg The index of the segment register to use for
9631 * this access. The base and limits are checked.
9632 * @param GCPtrMem The address of the guest memory.
9633 */
9634IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9635{
9636 /* The lazy approach for now... */
9637 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9638 if ( (GCPtrMem & 15)
9639 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9640 return iemRaiseGeneralProtectionFault0(pVCpu);
9641
9642 PCRTUINT128U pu128Src;
9643 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9644 if (rc == VINF_SUCCESS)
9645 {
9646 pu128Dst->au64[0] = pu128Src->au64[0];
9647 pu128Dst->au64[1] = pu128Src->au64[1];
9648 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9649 }
9650 return rc;
9651}
9652
9653
9654#ifdef IEM_WITH_SETJMP
9655/**
9656 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9657 * related, longjmp on error.
9658 *
9659 * Raises \#GP(0) if not aligned.
9660 *
9661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9662 * @param pu128Dst Where to return the qword.
9663 * @param iSegReg The index of the segment register to use for
9664 * this access. The base and limits are checked.
9665 * @param GCPtrMem The address of the guest memory.
9666 */
9667DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9668{
9669 /* The lazy approach for now... */
9670 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9671 if ( (GCPtrMem & 15) == 0
9672 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9673 {
9674 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9675 pu128Dst->au64[0] = pu128Src->au64[0];
9676 pu128Dst->au64[1] = pu128Src->au64[1];
9677 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9678 return;
9679 }
9680
9681 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9682 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9683}
9684#endif
9685
9686
9687/**
9688 * Fetches a data oword (octo word), generally AVX related.
9689 *
9690 * @returns Strict VBox status code.
9691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9692 * @param pu256Dst Where to return the qword.
9693 * @param iSegReg The index of the segment register to use for
9694 * this access. The base and limits are checked.
9695 * @param GCPtrMem The address of the guest memory.
9696 */
9697IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9698{
9699 /* The lazy approach for now... */
9700 PCRTUINT256U pu256Src;
9701 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9702 if (rc == VINF_SUCCESS)
9703 {
9704 pu256Dst->au64[0] = pu256Src->au64[0];
9705 pu256Dst->au64[1] = pu256Src->au64[1];
9706 pu256Dst->au64[2] = pu256Src->au64[2];
9707 pu256Dst->au64[3] = pu256Src->au64[3];
9708 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9709 }
9710 return rc;
9711}
9712
9713
9714#ifdef IEM_WITH_SETJMP
9715/**
9716 * Fetches a data oword (octo word), generally AVX related.
9717 *
9718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9719 * @param pu256Dst Where to return the qword.
9720 * @param iSegReg The index of the segment register to use for
9721 * this access. The base and limits are checked.
9722 * @param GCPtrMem The address of the guest memory.
9723 */
9724IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9725{
9726 /* The lazy approach for now... */
9727 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9728 pu256Dst->au64[0] = pu256Src->au64[0];
9729 pu256Dst->au64[1] = pu256Src->au64[1];
9730 pu256Dst->au64[2] = pu256Src->au64[2];
9731 pu256Dst->au64[3] = pu256Src->au64[3];
9732 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9733}
9734#endif
9735
9736
9737/**
9738 * Fetches a data oword (octo word) at an aligned address, generally AVX
9739 * related.
9740 *
9741 * Raises \#GP(0) if not aligned.
9742 *
9743 * @returns Strict VBox status code.
9744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9745 * @param pu256Dst Where to return the qword.
9746 * @param iSegReg The index of the segment register to use for
9747 * this access. The base and limits are checked.
9748 * @param GCPtrMem The address of the guest memory.
9749 */
9750IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9751{
9752 /* The lazy approach for now... */
9753 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9754 if (GCPtrMem & 31)
9755 return iemRaiseGeneralProtectionFault0(pVCpu);
9756
9757 PCRTUINT256U pu256Src;
9758 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9759 if (rc == VINF_SUCCESS)
9760 {
9761 pu256Dst->au64[0] = pu256Src->au64[0];
9762 pu256Dst->au64[1] = pu256Src->au64[1];
9763 pu256Dst->au64[2] = pu256Src->au64[2];
9764 pu256Dst->au64[3] = pu256Src->au64[3];
9765 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9766 }
9767 return rc;
9768}
9769
9770
9771#ifdef IEM_WITH_SETJMP
9772/**
9773 * Fetches a data oword (octo word) at an aligned address, generally AVX
9774 * related, longjmp on error.
9775 *
9776 * Raises \#GP(0) if not aligned.
9777 *
9778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9779 * @param pu256Dst Where to return the qword.
9780 * @param iSegReg The index of the segment register to use for
9781 * this access. The base and limits are checked.
9782 * @param GCPtrMem The address of the guest memory.
9783 */
9784DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9785{
9786 /* The lazy approach for now... */
9787 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9788 if ((GCPtrMem & 31) == 0)
9789 {
9790 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9791 pu256Dst->au64[0] = pu256Src->au64[0];
9792 pu256Dst->au64[1] = pu256Src->au64[1];
9793 pu256Dst->au64[2] = pu256Src->au64[2];
9794 pu256Dst->au64[3] = pu256Src->au64[3];
9795 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9796 return;
9797 }
9798
9799 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9800 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9801}
9802#endif
9803
9804
9805
9806/**
9807 * Fetches a descriptor register (lgdt, lidt).
9808 *
9809 * @returns Strict VBox status code.
9810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9811 * @param pcbLimit Where to return the limit.
9812 * @param pGCPtrBase Where to return the base.
9813 * @param iSegReg The index of the segment register to use for
9814 * this access. The base and limits are checked.
9815 * @param GCPtrMem The address of the guest memory.
9816 * @param enmOpSize The effective operand size.
9817 */
9818IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9819 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9820{
9821 /*
9822 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9823 * little special:
9824 * - The two reads are done separately.
9825 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9826 * - We suspect the 386 to actually commit the limit before the base in
9827 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9828 * don't try emulate this eccentric behavior, because it's not well
9829 * enough understood and rather hard to trigger.
9830 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9831 */
9832 VBOXSTRICTRC rcStrict;
9833 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9834 {
9835 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9836 if (rcStrict == VINF_SUCCESS)
9837 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9838 }
9839 else
9840 {
9841 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9842 if (enmOpSize == IEMMODE_32BIT)
9843 {
9844 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9845 {
9846 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9847 if (rcStrict == VINF_SUCCESS)
9848 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9849 }
9850 else
9851 {
9852 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9853 if (rcStrict == VINF_SUCCESS)
9854 {
9855 *pcbLimit = (uint16_t)uTmp;
9856 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9857 }
9858 }
9859 if (rcStrict == VINF_SUCCESS)
9860 *pGCPtrBase = uTmp;
9861 }
9862 else
9863 {
9864 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9865 if (rcStrict == VINF_SUCCESS)
9866 {
9867 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9868 if (rcStrict == VINF_SUCCESS)
9869 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9870 }
9871 }
9872 }
9873 return rcStrict;
9874}
9875
9876
9877
9878/**
9879 * Stores a data byte.
9880 *
9881 * @returns Strict VBox status code.
9882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9883 * @param iSegReg The index of the segment register to use for
9884 * this access. The base and limits are checked.
9885 * @param GCPtrMem The address of the guest memory.
9886 * @param u8Value The value to store.
9887 */
9888IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9889{
9890 /* The lazy approach for now... */
9891 uint8_t *pu8Dst;
9892 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9893 if (rc == VINF_SUCCESS)
9894 {
9895 *pu8Dst = u8Value;
9896 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9897 }
9898 return rc;
9899}
9900
9901
9902#ifdef IEM_WITH_SETJMP
9903/**
9904 * Stores a data byte, longjmp on error.
9905 *
9906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9907 * @param iSegReg The index of the segment register to use for
9908 * this access. The base and limits are checked.
9909 * @param GCPtrMem The address of the guest memory.
9910 * @param u8Value The value to store.
9911 */
9912IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9913{
9914 /* The lazy approach for now... */
9915 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9916 *pu8Dst = u8Value;
9917 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9918}
9919#endif
9920
9921
9922/**
9923 * Stores a data word.
9924 *
9925 * @returns Strict VBox status code.
9926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9927 * @param iSegReg The index of the segment register to use for
9928 * this access. The base and limits are checked.
9929 * @param GCPtrMem The address of the guest memory.
9930 * @param u16Value The value to store.
9931 */
9932IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9933{
9934 /* The lazy approach for now... */
9935 uint16_t *pu16Dst;
9936 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9937 if (rc == VINF_SUCCESS)
9938 {
9939 *pu16Dst = u16Value;
9940 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9941 }
9942 return rc;
9943}
9944
9945
9946#ifdef IEM_WITH_SETJMP
9947/**
9948 * Stores a data word, longjmp on error.
9949 *
9950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9951 * @param iSegReg The index of the segment register to use for
9952 * this access. The base and limits are checked.
9953 * @param GCPtrMem The address of the guest memory.
9954 * @param u16Value The value to store.
9955 */
9956IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9957{
9958 /* The lazy approach for now... */
9959 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9960 *pu16Dst = u16Value;
9961 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9962}
9963#endif
9964
9965
9966/**
9967 * Stores a data dword.
9968 *
9969 * @returns Strict VBox status code.
9970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9971 * @param iSegReg The index of the segment register to use for
9972 * this access. The base and limits are checked.
9973 * @param GCPtrMem The address of the guest memory.
9974 * @param u32Value The value to store.
9975 */
9976IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9977{
9978 /* The lazy approach for now... */
9979 uint32_t *pu32Dst;
9980 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9981 if (rc == VINF_SUCCESS)
9982 {
9983 *pu32Dst = u32Value;
9984 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9985 }
9986 return rc;
9987}
9988
9989
9990#ifdef IEM_WITH_SETJMP
9991/**
9992 * Stores a data dword.
9993 *
9994 * @returns Strict VBox status code.
9995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9996 * @param iSegReg The index of the segment register to use for
9997 * this access. The base and limits are checked.
9998 * @param GCPtrMem The address of the guest memory.
9999 * @param u32Value The value to store.
10000 */
10001IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10002{
10003 /* The lazy approach for now... */
10004 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10005 *pu32Dst = u32Value;
10006 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10007}
10008#endif
10009
10010
10011/**
10012 * Stores a data qword.
10013 *
10014 * @returns Strict VBox status code.
10015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10016 * @param iSegReg The index of the segment register to use for
10017 * this access. The base and limits are checked.
10018 * @param GCPtrMem The address of the guest memory.
10019 * @param u64Value The value to store.
10020 */
10021IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10022{
10023 /* The lazy approach for now... */
10024 uint64_t *pu64Dst;
10025 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10026 if (rc == VINF_SUCCESS)
10027 {
10028 *pu64Dst = u64Value;
10029 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10030 }
10031 return rc;
10032}
10033
10034
10035#ifdef IEM_WITH_SETJMP
10036/**
10037 * Stores a data qword, longjmp on error.
10038 *
10039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10040 * @param iSegReg The index of the segment register to use for
10041 * this access. The base and limits are checked.
10042 * @param GCPtrMem The address of the guest memory.
10043 * @param u64Value The value to store.
10044 */
10045IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10046{
10047 /* The lazy approach for now... */
10048 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10049 *pu64Dst = u64Value;
10050 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10051}
10052#endif
10053
10054
10055/**
10056 * Stores a data dqword.
10057 *
10058 * @returns Strict VBox status code.
10059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10060 * @param iSegReg The index of the segment register to use for
10061 * this access. The base and limits are checked.
10062 * @param GCPtrMem The address of the guest memory.
10063 * @param u128Value The value to store.
10064 */
10065IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10066{
10067 /* The lazy approach for now... */
10068 PRTUINT128U pu128Dst;
10069 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10070 if (rc == VINF_SUCCESS)
10071 {
10072 pu128Dst->au64[0] = u128Value.au64[0];
10073 pu128Dst->au64[1] = u128Value.au64[1];
10074 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10075 }
10076 return rc;
10077}
10078
10079
10080#ifdef IEM_WITH_SETJMP
10081/**
10082 * Stores a data dqword, longjmp on error.
10083 *
10084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10085 * @param iSegReg The index of the segment register to use for
10086 * this access. The base and limits are checked.
10087 * @param GCPtrMem The address of the guest memory.
10088 * @param u128Value The value to store.
10089 */
10090IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10091{
10092 /* The lazy approach for now... */
10093 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10094 pu128Dst->au64[0] = u128Value.au64[0];
10095 pu128Dst->au64[1] = u128Value.au64[1];
10096 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10097}
10098#endif
10099
10100
10101/**
10102 * Stores a data dqword, SSE aligned.
10103 *
10104 * @returns Strict VBox status code.
10105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10106 * @param iSegReg The index of the segment register to use for
10107 * this access. The base and limits are checked.
10108 * @param GCPtrMem The address of the guest memory.
10109 * @param u128Value The value to store.
10110 */
10111IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10112{
10113 /* The lazy approach for now... */
10114 if ( (GCPtrMem & 15)
10115 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10116 return iemRaiseGeneralProtectionFault0(pVCpu);
10117
10118 PRTUINT128U pu128Dst;
10119 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10120 if (rc == VINF_SUCCESS)
10121 {
10122 pu128Dst->au64[0] = u128Value.au64[0];
10123 pu128Dst->au64[1] = u128Value.au64[1];
10124 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10125 }
10126 return rc;
10127}
10128
10129
10130#ifdef IEM_WITH_SETJMP
10131/**
10132 * Stores a data dqword, SSE aligned.
10133 *
10134 * @returns Strict VBox status code.
10135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10136 * @param iSegReg The index of the segment register to use for
10137 * this access. The base and limits are checked.
10138 * @param GCPtrMem The address of the guest memory.
10139 * @param u128Value The value to store.
10140 */
10141DECL_NO_INLINE(IEM_STATIC, void)
10142iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10143{
10144 /* The lazy approach for now... */
10145 if ( (GCPtrMem & 15) == 0
10146 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10147 {
10148 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10149 pu128Dst->au64[0] = u128Value.au64[0];
10150 pu128Dst->au64[1] = u128Value.au64[1];
10151 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10152 return;
10153 }
10154
10155 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10156 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10157}
10158#endif
10159
10160
10161/**
10162 * Stores a data dqword.
10163 *
10164 * @returns Strict VBox status code.
10165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10166 * @param iSegReg The index of the segment register to use for
10167 * this access. The base and limits are checked.
10168 * @param GCPtrMem The address of the guest memory.
10169 * @param pu256Value Pointer to the value to store.
10170 */
10171IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10172{
10173 /* The lazy approach for now... */
10174 PRTUINT256U pu256Dst;
10175 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10176 if (rc == VINF_SUCCESS)
10177 {
10178 pu256Dst->au64[0] = pu256Value->au64[0];
10179 pu256Dst->au64[1] = pu256Value->au64[1];
10180 pu256Dst->au64[2] = pu256Value->au64[2];
10181 pu256Dst->au64[3] = pu256Value->au64[3];
10182 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10183 }
10184 return rc;
10185}
10186
10187
10188#ifdef IEM_WITH_SETJMP
10189/**
10190 * Stores a data dqword, longjmp on error.
10191 *
10192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10193 * @param iSegReg The index of the segment register to use for
10194 * this access. The base and limits are checked.
10195 * @param GCPtrMem The address of the guest memory.
10196 * @param pu256Value Pointer to the value to store.
10197 */
10198IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10199{
10200 /* The lazy approach for now... */
10201 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10202 pu256Dst->au64[0] = pu256Value->au64[0];
10203 pu256Dst->au64[1] = pu256Value->au64[1];
10204 pu256Dst->au64[2] = pu256Value->au64[2];
10205 pu256Dst->au64[3] = pu256Value->au64[3];
10206 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10207}
10208#endif
10209
10210
10211/**
10212 * Stores a data dqword, AVX aligned.
10213 *
10214 * @returns Strict VBox status code.
10215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10216 * @param iSegReg The index of the segment register to use for
10217 * this access. The base and limits are checked.
10218 * @param GCPtrMem The address of the guest memory.
10219 * @param pu256Value Pointer to the value to store.
10220 */
10221IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10222{
10223 /* The lazy approach for now... */
10224 if (GCPtrMem & 31)
10225 return iemRaiseGeneralProtectionFault0(pVCpu);
10226
10227 PRTUINT256U pu256Dst;
10228 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10229 if (rc == VINF_SUCCESS)
10230 {
10231 pu256Dst->au64[0] = pu256Value->au64[0];
10232 pu256Dst->au64[1] = pu256Value->au64[1];
10233 pu256Dst->au64[2] = pu256Value->au64[2];
10234 pu256Dst->au64[3] = pu256Value->au64[3];
10235 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10236 }
10237 return rc;
10238}
10239
10240
10241#ifdef IEM_WITH_SETJMP
10242/**
10243 * Stores a data dqword, AVX aligned.
10244 *
10245 * @returns Strict VBox status code.
10246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10247 * @param iSegReg The index of the segment register to use for
10248 * this access. The base and limits are checked.
10249 * @param GCPtrMem The address of the guest memory.
10250 * @param pu256Value Pointer to the value to store.
10251 */
10252DECL_NO_INLINE(IEM_STATIC, void)
10253iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10254{
10255 /* The lazy approach for now... */
10256 if ((GCPtrMem & 31) == 0)
10257 {
10258 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10259 pu256Dst->au64[0] = pu256Value->au64[0];
10260 pu256Dst->au64[1] = pu256Value->au64[1];
10261 pu256Dst->au64[2] = pu256Value->au64[2];
10262 pu256Dst->au64[3] = pu256Value->au64[3];
10263 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10264 return;
10265 }
10266
10267 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10268 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10269}
10270#endif
10271
10272
10273/**
10274 * Stores a descriptor register (sgdt, sidt).
10275 *
10276 * @returns Strict VBox status code.
10277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10278 * @param cbLimit The limit.
10279 * @param GCPtrBase The base address.
10280 * @param iSegReg The index of the segment register to use for
10281 * this access. The base and limits are checked.
10282 * @param GCPtrMem The address of the guest memory.
10283 */
10284IEM_STATIC VBOXSTRICTRC
10285iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10286{
10287 /*
10288 * The SIDT and SGDT instructions actually stores the data using two
10289 * independent writes. The instructions does not respond to opsize prefixes.
10290 */
10291 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10292 if (rcStrict == VINF_SUCCESS)
10293 {
10294 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10295 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10296 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10297 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10298 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10299 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10300 else
10301 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10302 }
10303 return rcStrict;
10304}
10305
10306
10307/**
10308 * Pushes a word onto the stack.
10309 *
10310 * @returns Strict VBox status code.
10311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10312 * @param u16Value The value to push.
10313 */
10314IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10315{
10316 /* Increment the stack pointer. */
10317 uint64_t uNewRsp;
10318 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10319
10320 /* Write the word the lazy way. */
10321 uint16_t *pu16Dst;
10322 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10323 if (rc == VINF_SUCCESS)
10324 {
10325 *pu16Dst = u16Value;
10326 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10327 }
10328
10329 /* Commit the new RSP value unless we an access handler made trouble. */
10330 if (rc == VINF_SUCCESS)
10331 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10332
10333 return rc;
10334}
10335
10336
10337/**
10338 * Pushes a dword onto the stack.
10339 *
10340 * @returns Strict VBox status code.
10341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10342 * @param u32Value The value to push.
10343 */
10344IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10345{
10346 /* Increment the stack pointer. */
10347 uint64_t uNewRsp;
10348 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10349
10350 /* Write the dword the lazy way. */
10351 uint32_t *pu32Dst;
10352 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10353 if (rc == VINF_SUCCESS)
10354 {
10355 *pu32Dst = u32Value;
10356 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10357 }
10358
10359 /* Commit the new RSP value unless we an access handler made trouble. */
10360 if (rc == VINF_SUCCESS)
10361 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10362
10363 return rc;
10364}
10365
10366
10367/**
10368 * Pushes a dword segment register value onto the stack.
10369 *
10370 * @returns Strict VBox status code.
10371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10372 * @param u32Value The value to push.
10373 */
10374IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10375{
10376 /* Increment the stack pointer. */
10377 uint64_t uNewRsp;
10378 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10379
10380 /* The intel docs talks about zero extending the selector register
10381 value. My actual intel CPU here might be zero extending the value
10382 but it still only writes the lower word... */
10383 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10384 * happens when crossing an electric page boundrary, is the high word checked
10385 * for write accessibility or not? Probably it is. What about segment limits?
10386 * It appears this behavior is also shared with trap error codes.
10387 *
10388 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10389 * ancient hardware when it actually did change. */
10390 uint16_t *pu16Dst;
10391 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10392 if (rc == VINF_SUCCESS)
10393 {
10394 *pu16Dst = (uint16_t)u32Value;
10395 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10396 }
10397
10398 /* Commit the new RSP value unless we an access handler made trouble. */
10399 if (rc == VINF_SUCCESS)
10400 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10401
10402 return rc;
10403}
10404
10405
10406/**
10407 * Pushes a qword onto the stack.
10408 *
10409 * @returns Strict VBox status code.
10410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10411 * @param u64Value The value to push.
10412 */
10413IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10414{
10415 /* Increment the stack pointer. */
10416 uint64_t uNewRsp;
10417 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10418
10419 /* Write the word the lazy way. */
10420 uint64_t *pu64Dst;
10421 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10422 if (rc == VINF_SUCCESS)
10423 {
10424 *pu64Dst = u64Value;
10425 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10426 }
10427
10428 /* Commit the new RSP value unless we an access handler made trouble. */
10429 if (rc == VINF_SUCCESS)
10430 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10431
10432 return rc;
10433}
10434
10435
10436/**
10437 * Pops a word from the stack.
10438 *
10439 * @returns Strict VBox status code.
10440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10441 * @param pu16Value Where to store the popped value.
10442 */
10443IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10444{
10445 /* Increment the stack pointer. */
10446 uint64_t uNewRsp;
10447 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10448
10449 /* Write the word the lazy way. */
10450 uint16_t const *pu16Src;
10451 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10452 if (rc == VINF_SUCCESS)
10453 {
10454 *pu16Value = *pu16Src;
10455 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10456
10457 /* Commit the new RSP value. */
10458 if (rc == VINF_SUCCESS)
10459 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10460 }
10461
10462 return rc;
10463}
10464
10465
10466/**
10467 * Pops a dword from the stack.
10468 *
10469 * @returns Strict VBox status code.
10470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10471 * @param pu32Value Where to store the popped value.
10472 */
10473IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10474{
10475 /* Increment the stack pointer. */
10476 uint64_t uNewRsp;
10477 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10478
10479 /* Write the word the lazy way. */
10480 uint32_t const *pu32Src;
10481 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10482 if (rc == VINF_SUCCESS)
10483 {
10484 *pu32Value = *pu32Src;
10485 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10486
10487 /* Commit the new RSP value. */
10488 if (rc == VINF_SUCCESS)
10489 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10490 }
10491
10492 return rc;
10493}
10494
10495
10496/**
10497 * Pops a qword from the stack.
10498 *
10499 * @returns Strict VBox status code.
10500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10501 * @param pu64Value Where to store the popped value.
10502 */
10503IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10504{
10505 /* Increment the stack pointer. */
10506 uint64_t uNewRsp;
10507 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10508
10509 /* Write the word the lazy way. */
10510 uint64_t const *pu64Src;
10511 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10512 if (rc == VINF_SUCCESS)
10513 {
10514 *pu64Value = *pu64Src;
10515 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10516
10517 /* Commit the new RSP value. */
10518 if (rc == VINF_SUCCESS)
10519 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10520 }
10521
10522 return rc;
10523}
10524
10525
10526/**
10527 * Pushes a word onto the stack, using a temporary stack pointer.
10528 *
10529 * @returns Strict VBox status code.
10530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10531 * @param u16Value The value to push.
10532 * @param pTmpRsp Pointer to the temporary stack pointer.
10533 */
10534IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10535{
10536 /* Increment the stack pointer. */
10537 RTUINT64U NewRsp = *pTmpRsp;
10538 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10539
10540 /* Write the word the lazy way. */
10541 uint16_t *pu16Dst;
10542 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10543 if (rc == VINF_SUCCESS)
10544 {
10545 *pu16Dst = u16Value;
10546 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10547 }
10548
10549 /* Commit the new RSP value unless we an access handler made trouble. */
10550 if (rc == VINF_SUCCESS)
10551 *pTmpRsp = NewRsp;
10552
10553 return rc;
10554}
10555
10556
10557/**
10558 * Pushes a dword onto the stack, using a temporary stack pointer.
10559 *
10560 * @returns Strict VBox status code.
10561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10562 * @param u32Value The value to push.
10563 * @param pTmpRsp Pointer to the temporary stack pointer.
10564 */
10565IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10566{
10567 /* Increment the stack pointer. */
10568 RTUINT64U NewRsp = *pTmpRsp;
10569 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10570
10571 /* Write the word the lazy way. */
10572 uint32_t *pu32Dst;
10573 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10574 if (rc == VINF_SUCCESS)
10575 {
10576 *pu32Dst = u32Value;
10577 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10578 }
10579
10580 /* Commit the new RSP value unless we an access handler made trouble. */
10581 if (rc == VINF_SUCCESS)
10582 *pTmpRsp = NewRsp;
10583
10584 return rc;
10585}
10586
10587
10588/**
10589 * Pushes a dword onto the stack, using a temporary stack pointer.
10590 *
10591 * @returns Strict VBox status code.
10592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10593 * @param u64Value The value to push.
10594 * @param pTmpRsp Pointer to the temporary stack pointer.
10595 */
10596IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10597{
10598 /* Increment the stack pointer. */
10599 RTUINT64U NewRsp = *pTmpRsp;
10600 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10601
10602 /* Write the word the lazy way. */
10603 uint64_t *pu64Dst;
10604 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10605 if (rc == VINF_SUCCESS)
10606 {
10607 *pu64Dst = u64Value;
10608 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10609 }
10610
10611 /* Commit the new RSP value unless we an access handler made trouble. */
10612 if (rc == VINF_SUCCESS)
10613 *pTmpRsp = NewRsp;
10614
10615 return rc;
10616}
10617
10618
10619/**
10620 * Pops a word from the stack, using a temporary stack pointer.
10621 *
10622 * @returns Strict VBox status code.
10623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10624 * @param pu16Value Where to store the popped value.
10625 * @param pTmpRsp Pointer to the temporary stack pointer.
10626 */
10627IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10628{
10629 /* Increment the stack pointer. */
10630 RTUINT64U NewRsp = *pTmpRsp;
10631 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10632
10633 /* Write the word the lazy way. */
10634 uint16_t const *pu16Src;
10635 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10636 if (rc == VINF_SUCCESS)
10637 {
10638 *pu16Value = *pu16Src;
10639 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10640
10641 /* Commit the new RSP value. */
10642 if (rc == VINF_SUCCESS)
10643 *pTmpRsp = NewRsp;
10644 }
10645
10646 return rc;
10647}
10648
10649
10650/**
10651 * Pops a dword from the stack, using a temporary stack pointer.
10652 *
10653 * @returns Strict VBox status code.
10654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10655 * @param pu32Value Where to store the popped value.
10656 * @param pTmpRsp Pointer to the temporary stack pointer.
10657 */
10658IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10659{
10660 /* Increment the stack pointer. */
10661 RTUINT64U NewRsp = *pTmpRsp;
10662 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10663
10664 /* Write the word the lazy way. */
10665 uint32_t const *pu32Src;
10666 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10667 if (rc == VINF_SUCCESS)
10668 {
10669 *pu32Value = *pu32Src;
10670 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10671
10672 /* Commit the new RSP value. */
10673 if (rc == VINF_SUCCESS)
10674 *pTmpRsp = NewRsp;
10675 }
10676
10677 return rc;
10678}
10679
10680
10681/**
10682 * Pops a qword from the stack, using a temporary stack pointer.
10683 *
10684 * @returns Strict VBox status code.
10685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10686 * @param pu64Value Where to store the popped value.
10687 * @param pTmpRsp Pointer to the temporary stack pointer.
10688 */
10689IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10690{
10691 /* Increment the stack pointer. */
10692 RTUINT64U NewRsp = *pTmpRsp;
10693 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10694
10695 /* Write the word the lazy way. */
10696 uint64_t const *pu64Src;
10697 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10698 if (rcStrict == VINF_SUCCESS)
10699 {
10700 *pu64Value = *pu64Src;
10701 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10702
10703 /* Commit the new RSP value. */
10704 if (rcStrict == VINF_SUCCESS)
10705 *pTmpRsp = NewRsp;
10706 }
10707
10708 return rcStrict;
10709}
10710
10711
10712/**
10713 * Begin a special stack push (used by interrupt, exceptions and such).
10714 *
10715 * This will raise \#SS or \#PF if appropriate.
10716 *
10717 * @returns Strict VBox status code.
10718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10719 * @param cbMem The number of bytes to push onto the stack.
10720 * @param ppvMem Where to return the pointer to the stack memory.
10721 * As with the other memory functions this could be
10722 * direct access or bounce buffered access, so
10723 * don't commit register until the commit call
10724 * succeeds.
10725 * @param puNewRsp Where to return the new RSP value. This must be
10726 * passed unchanged to
10727 * iemMemStackPushCommitSpecial().
10728 */
10729IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10730{
10731 Assert(cbMem < UINT8_MAX);
10732 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10733 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10734}
10735
10736
10737/**
10738 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10739 *
10740 * This will update the rSP.
10741 *
10742 * @returns Strict VBox status code.
10743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10744 * @param pvMem The pointer returned by
10745 * iemMemStackPushBeginSpecial().
10746 * @param uNewRsp The new RSP value returned by
10747 * iemMemStackPushBeginSpecial().
10748 */
10749IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10750{
10751 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10752 if (rcStrict == VINF_SUCCESS)
10753 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10754 return rcStrict;
10755}
10756
10757
10758/**
10759 * Begin a special stack pop (used by iret, retf and such).
10760 *
10761 * This will raise \#SS or \#PF if appropriate.
10762 *
10763 * @returns Strict VBox status code.
10764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10765 * @param cbMem The number of bytes to pop from the stack.
10766 * @param ppvMem Where to return the pointer to the stack memory.
10767 * @param puNewRsp Where to return the new RSP value. This must be
10768 * assigned to CPUMCTX::rsp manually some time
10769 * after iemMemStackPopDoneSpecial() has been
10770 * called.
10771 */
10772IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10773{
10774 Assert(cbMem < UINT8_MAX);
10775 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10776 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10777}
10778
10779
10780/**
10781 * Continue a special stack pop (used by iret and retf).
10782 *
10783 * This will raise \#SS or \#PF if appropriate.
10784 *
10785 * @returns Strict VBox status code.
10786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10787 * @param cbMem The number of bytes to pop from the stack.
10788 * @param ppvMem Where to return the pointer to the stack memory.
10789 * @param puNewRsp Where to return the new RSP value. This must be
10790 * assigned to CPUMCTX::rsp manually some time
10791 * after iemMemStackPopDoneSpecial() has been
10792 * called.
10793 */
10794IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10795{
10796 Assert(cbMem < UINT8_MAX);
10797 RTUINT64U NewRsp;
10798 NewRsp.u = *puNewRsp;
10799 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10800 *puNewRsp = NewRsp.u;
10801 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10802}
10803
10804
10805/**
10806 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10807 * iemMemStackPopContinueSpecial).
10808 *
10809 * The caller will manually commit the rSP.
10810 *
10811 * @returns Strict VBox status code.
10812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10813 * @param pvMem The pointer returned by
10814 * iemMemStackPopBeginSpecial() or
10815 * iemMemStackPopContinueSpecial().
10816 */
10817IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10818{
10819 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10820}
10821
10822
10823/**
10824 * Fetches a system table byte.
10825 *
10826 * @returns Strict VBox status code.
10827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10828 * @param pbDst Where to return the byte.
10829 * @param iSegReg The index of the segment register to use for
10830 * this access. The base and limits are checked.
10831 * @param GCPtrMem The address of the guest memory.
10832 */
10833IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10834{
10835 /* The lazy approach for now... */
10836 uint8_t const *pbSrc;
10837 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10838 if (rc == VINF_SUCCESS)
10839 {
10840 *pbDst = *pbSrc;
10841 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10842 }
10843 return rc;
10844}
10845
10846
10847/**
10848 * Fetches a system table word.
10849 *
10850 * @returns Strict VBox status code.
10851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10852 * @param pu16Dst Where to return the word.
10853 * @param iSegReg The index of the segment register to use for
10854 * this access. The base and limits are checked.
10855 * @param GCPtrMem The address of the guest memory.
10856 */
10857IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10858{
10859 /* The lazy approach for now... */
10860 uint16_t const *pu16Src;
10861 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10862 if (rc == VINF_SUCCESS)
10863 {
10864 *pu16Dst = *pu16Src;
10865 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10866 }
10867 return rc;
10868}
10869
10870
10871/**
10872 * Fetches a system table dword.
10873 *
10874 * @returns Strict VBox status code.
10875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10876 * @param pu32Dst Where to return the dword.
10877 * @param iSegReg The index of the segment register to use for
10878 * this access. The base and limits are checked.
10879 * @param GCPtrMem The address of the guest memory.
10880 */
10881IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10882{
10883 /* The lazy approach for now... */
10884 uint32_t const *pu32Src;
10885 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10886 if (rc == VINF_SUCCESS)
10887 {
10888 *pu32Dst = *pu32Src;
10889 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10890 }
10891 return rc;
10892}
10893
10894
10895/**
10896 * Fetches a system table qword.
10897 *
10898 * @returns Strict VBox status code.
10899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10900 * @param pu64Dst Where to return the qword.
10901 * @param iSegReg The index of the segment register to use for
10902 * this access. The base and limits are checked.
10903 * @param GCPtrMem The address of the guest memory.
10904 */
10905IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10906{
10907 /* The lazy approach for now... */
10908 uint64_t const *pu64Src;
10909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10910 if (rc == VINF_SUCCESS)
10911 {
10912 *pu64Dst = *pu64Src;
10913 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10914 }
10915 return rc;
10916}
10917
10918
10919/**
10920 * Fetches a descriptor table entry with caller specified error code.
10921 *
10922 * @returns Strict VBox status code.
10923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10924 * @param pDesc Where to return the descriptor table entry.
10925 * @param uSel The selector which table entry to fetch.
10926 * @param uXcpt The exception to raise on table lookup error.
10927 * @param uErrorCode The error code associated with the exception.
10928 */
10929IEM_STATIC VBOXSTRICTRC
10930iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10931{
10932 AssertPtr(pDesc);
10933 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10934
10935 /** @todo did the 286 require all 8 bytes to be accessible? */
10936 /*
10937 * Get the selector table base and check bounds.
10938 */
10939 RTGCPTR GCPtrBase;
10940 if (uSel & X86_SEL_LDT)
10941 {
10942 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10943 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10944 {
10945 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10946 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10947 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10948 uErrorCode, 0);
10949 }
10950
10951 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10952 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10953 }
10954 else
10955 {
10956 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10957 {
10958 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10959 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10960 uErrorCode, 0);
10961 }
10962 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10963 }
10964
10965 /*
10966 * Read the legacy descriptor and maybe the long mode extensions if
10967 * required.
10968 */
10969 VBOXSTRICTRC rcStrict;
10970 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10971 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10972 else
10973 {
10974 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10975 if (rcStrict == VINF_SUCCESS)
10976 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10977 if (rcStrict == VINF_SUCCESS)
10978 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10979 if (rcStrict == VINF_SUCCESS)
10980 pDesc->Legacy.au16[3] = 0;
10981 else
10982 return rcStrict;
10983 }
10984
10985 if (rcStrict == VINF_SUCCESS)
10986 {
10987 if ( !IEM_IS_LONG_MODE(pVCpu)
10988 || pDesc->Legacy.Gen.u1DescType)
10989 pDesc->Long.au64[1] = 0;
10990 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10991 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10992 else
10993 {
10994 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10995 /** @todo is this the right exception? */
10996 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10997 }
10998 }
10999 return rcStrict;
11000}
11001
11002
11003/**
11004 * Fetches a descriptor table entry.
11005 *
11006 * @returns Strict VBox status code.
11007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11008 * @param pDesc Where to return the descriptor table entry.
11009 * @param uSel The selector which table entry to fetch.
11010 * @param uXcpt The exception to raise on table lookup error.
11011 */
11012IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11013{
11014 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11015}
11016
11017
11018/**
11019 * Fakes a long mode stack selector for SS = 0.
11020 *
11021 * @param pDescSs Where to return the fake stack descriptor.
11022 * @param uDpl The DPL we want.
11023 */
11024IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11025{
11026 pDescSs->Long.au64[0] = 0;
11027 pDescSs->Long.au64[1] = 0;
11028 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11029 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11030 pDescSs->Long.Gen.u2Dpl = uDpl;
11031 pDescSs->Long.Gen.u1Present = 1;
11032 pDescSs->Long.Gen.u1Long = 1;
11033}
11034
11035
11036/**
11037 * Marks the selector descriptor as accessed (only non-system descriptors).
11038 *
11039 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11040 * will therefore skip the limit checks.
11041 *
11042 * @returns Strict VBox status code.
11043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11044 * @param uSel The selector.
11045 */
11046IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11047{
11048 /*
11049 * Get the selector table base and calculate the entry address.
11050 */
11051 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11052 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11053 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11054 GCPtr += uSel & X86_SEL_MASK;
11055
11056 /*
11057 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11058 * ugly stuff to avoid this. This will make sure it's an atomic access
11059 * as well more or less remove any question about 8-bit or 32-bit accesss.
11060 */
11061 VBOXSTRICTRC rcStrict;
11062 uint32_t volatile *pu32;
11063 if ((GCPtr & 3) == 0)
11064 {
11065 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11066 GCPtr += 2 + 2;
11067 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11068 if (rcStrict != VINF_SUCCESS)
11069 return rcStrict;
11070 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11071 }
11072 else
11073 {
11074 /* The misaligned GDT/LDT case, map the whole thing. */
11075 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11076 if (rcStrict != VINF_SUCCESS)
11077 return rcStrict;
11078 switch ((uintptr_t)pu32 & 3)
11079 {
11080 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11081 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11082 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11083 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11084 }
11085 }
11086
11087 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11088}
11089
11090/** @} */
11091
11092
11093/*
11094 * Include the C/C++ implementation of instruction.
11095 */
11096#include "IEMAllCImpl.cpp.h"
11097
11098
11099
11100/** @name "Microcode" macros.
11101 *
11102 * The idea is that we should be able to use the same code to interpret
11103 * instructions as well as recompiler instructions. Thus this obfuscation.
11104 *
11105 * @{
11106 */
11107#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11108#define IEM_MC_END() }
11109#define IEM_MC_PAUSE() do {} while (0)
11110#define IEM_MC_CONTINUE() do {} while (0)
11111
11112/** Internal macro. */
11113#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11114 do \
11115 { \
11116 VBOXSTRICTRC rcStrict2 = a_Expr; \
11117 if (rcStrict2 != VINF_SUCCESS) \
11118 return rcStrict2; \
11119 } while (0)
11120
11121
11122#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11123#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11124#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11125#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11126#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11127#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11128#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11129#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11130#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11131 do { \
11132 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11133 return iemRaiseDeviceNotAvailable(pVCpu); \
11134 } while (0)
11135#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11136 do { \
11137 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11138 return iemRaiseDeviceNotAvailable(pVCpu); \
11139 } while (0)
11140#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11141 do { \
11142 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11143 return iemRaiseMathFault(pVCpu); \
11144 } while (0)
11145#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11146 do { \
11147 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11148 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11149 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11150 return iemRaiseUndefinedOpcode(pVCpu); \
11151 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11152 return iemRaiseDeviceNotAvailable(pVCpu); \
11153 } while (0)
11154#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11155 do { \
11156 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11157 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11158 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11159 return iemRaiseUndefinedOpcode(pVCpu); \
11160 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11161 return iemRaiseDeviceNotAvailable(pVCpu); \
11162 } while (0)
11163#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11164 do { \
11165 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11166 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11167 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11168 return iemRaiseUndefinedOpcode(pVCpu); \
11169 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11170 return iemRaiseDeviceNotAvailable(pVCpu); \
11171 } while (0)
11172#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11173 do { \
11174 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11175 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11176 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11177 return iemRaiseUndefinedOpcode(pVCpu); \
11178 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11179 return iemRaiseDeviceNotAvailable(pVCpu); \
11180 } while (0)
11181#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11182 do { \
11183 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11184 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11185 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11186 return iemRaiseUndefinedOpcode(pVCpu); \
11187 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11188 return iemRaiseDeviceNotAvailable(pVCpu); \
11189 } while (0)
11190#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11191 do { \
11192 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11193 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11194 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11195 return iemRaiseUndefinedOpcode(pVCpu); \
11196 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11197 return iemRaiseDeviceNotAvailable(pVCpu); \
11198 } while (0)
11199#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11200 do { \
11201 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11202 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11203 return iemRaiseUndefinedOpcode(pVCpu); \
11204 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11205 return iemRaiseDeviceNotAvailable(pVCpu); \
11206 } while (0)
11207#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11208 do { \
11209 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11210 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11211 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11212 return iemRaiseUndefinedOpcode(pVCpu); \
11213 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11214 return iemRaiseDeviceNotAvailable(pVCpu); \
11215 } while (0)
11216#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11217 do { \
11218 if (pVCpu->iem.s.uCpl != 0) \
11219 return iemRaiseGeneralProtectionFault0(pVCpu); \
11220 } while (0)
11221#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11222 do { \
11223 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11224 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11225 } while (0)
11226#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11227 do { \
11228 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11229 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11230 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11231 return iemRaiseUndefinedOpcode(pVCpu); \
11232 } while (0)
11233#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11234 do { \
11235 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11236 return iemRaiseGeneralProtectionFault0(pVCpu); \
11237 } while (0)
11238
11239
11240#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11241#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11242#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11243#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11244#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11245#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11246#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11247 uint32_t a_Name; \
11248 uint32_t *a_pName = &a_Name
11249#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11250 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11251
11252#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11253#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11254
11255#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11256#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11257#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11258#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11259#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11260#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11261#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11262#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11263#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11264#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11265#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11266#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11267#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11268#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11269#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11270#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11271#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11272#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11273 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11274 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11275 } while (0)
11276#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11277 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11278 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11279 } while (0)
11280#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11281 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11282 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11283 } while (0)
11284/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11285#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11286 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11287 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11288 } while (0)
11289#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11290 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11291 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11292 } while (0)
11293/** @note Not for IOPL or IF testing or modification. */
11294#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11295#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11296#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11297#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11298
11299#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11300#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11301#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11302#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11303#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11304#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11305#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11306#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11307#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11308#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11309/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11310#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11311 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11312 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11313 } while (0)
11314#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11315 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11316 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11317 } while (0)
11318#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11319 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11320
11321
11322#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11323#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11324/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11325 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11326#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11327#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11328/** @note Not for IOPL or IF testing or modification. */
11329#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11330
11331#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11332#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11333#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11334 do { \
11335 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11336 *pu32Reg += (a_u32Value); \
11337 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11338 } while (0)
11339#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11340
11341#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11342#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11343#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11344 do { \
11345 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11346 *pu32Reg -= (a_u32Value); \
11347 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11348 } while (0)
11349#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11350#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11351
11352#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11353#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11354#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11355#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11356#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11357#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11358#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11359
11360#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11361#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11362#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11363#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11364
11365#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11366#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11367#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11368
11369#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11370#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11371#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11372
11373#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11374#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11375#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11376
11377#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11378#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11379#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11380
11381#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11382
11383#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11384
11385#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11386#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11387#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11388 do { \
11389 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11390 *pu32Reg &= (a_u32Value); \
11391 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11392 } while (0)
11393#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11394
11395#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11396#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11397#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11398 do { \
11399 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11400 *pu32Reg |= (a_u32Value); \
11401 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11402 } while (0)
11403#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11404
11405
11406/** @note Not for IOPL or IF modification. */
11407#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11408/** @note Not for IOPL or IF modification. */
11409#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11410/** @note Not for IOPL or IF modification. */
11411#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11412
11413#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11414
11415/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11416#define IEM_MC_FPU_TO_MMX_MODE() do { \
11417 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11418 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11419 } while (0)
11420
11421/** Switches the FPU state from MMX mode (FTW=0xffff). */
11422#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11423 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11424 } while (0)
11425
11426#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11427 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11428#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11429 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11430#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11431 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11432 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11433 } while (0)
11434#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11435 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11436 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11437 } while (0)
11438#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11439 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11440#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11441 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11442#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11443 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11444
11445#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11446 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11447 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11448 } while (0)
11449#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11450 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11451#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11452 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11453#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11454 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11455#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11456 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11457 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11458 } while (0)
11459#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11460 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11461#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11462 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11463 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11464 } while (0)
11465#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11466 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11467#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11468 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11469 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11470 } while (0)
11471#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11472 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11473#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11474 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11475#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11476 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11477#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11478 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11479#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11480 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11481 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11482 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11483 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11484 } while (0)
11485
11486#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11487 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11488 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11489 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11490 } while (0)
11491#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11492 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11493 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11494 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11495 } while (0)
11496#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11497 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11498 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11499 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11500 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11501 } while (0)
11502#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11503 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11504 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11505 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11506 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11507 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11508 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11509 } while (0)
11510
11511#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11512#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11513 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11514 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11515 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11516 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11517 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11518 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11519 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11520 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11521 } while (0)
11522#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11523 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11524 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11525 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11526 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11527 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11528 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11529 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11530 } while (0)
11531#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11532 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11533 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11534 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11535 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11536 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11537 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11538 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11539 } while (0)
11540#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11541 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11542 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11543 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11544 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11545 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11546 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11547 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11548 } while (0)
11549
11550#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11551 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11552#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11553 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11554#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11555 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11556#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11557 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11558 uintptr_t const iYRegTmp = (a_iYReg); \
11559 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11560 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11561 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11562 } while (0)
11563
11564#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11565 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11566 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11567 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11568 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11569 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11570 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11571 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11572 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11573 } while (0)
11574#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11575 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11576 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11577 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11578 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11579 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11580 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11581 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11582 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11583 } while (0)
11584#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11585 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11586 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11587 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11588 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11589 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11590 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11591 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11592 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11593 } while (0)
11594
11595#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11596 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11597 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11598 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11599 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11600 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11601 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11602 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11603 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11604 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11605 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11606 } while (0)
11607#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11608 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11609 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11610 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11611 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11612 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11613 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11614 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11615 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11616 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11617 } while (0)
11618#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11619 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11620 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11621 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11622 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11623 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11624 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11625 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11626 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11627 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11628 } while (0)
11629#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11630 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11631 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11632 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11633 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11634 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11635 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11636 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11637 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11638 } while (0)
11639
11640#ifndef IEM_WITH_SETJMP
11641# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11643# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11644 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11645# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11646 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11647#else
11648# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11649 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11650# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11651 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11652# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11653 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11654#endif
11655
11656#ifndef IEM_WITH_SETJMP
11657# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11659# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11661# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11663#else
11664# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11665 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11666# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11667 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11668# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11669 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11670#endif
11671
11672#ifndef IEM_WITH_SETJMP
11673# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11675# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11677# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11679#else
11680# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11681 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11683 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11684# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11685 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11686#endif
11687
11688#ifdef SOME_UNUSED_FUNCTION
11689# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11691#endif
11692
11693#ifndef IEM_WITH_SETJMP
11694# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11696# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11698# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11700# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11702#else
11703# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11704 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11706 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11707# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11708 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11709# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11710 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11711#endif
11712
11713#ifndef IEM_WITH_SETJMP
11714# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11716# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11718# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11720#else
11721# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11722 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11723# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11724 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11725# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11726 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11727#endif
11728
11729#ifndef IEM_WITH_SETJMP
11730# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11732# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11734#else
11735# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11736 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11737# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11738 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11739#endif
11740
11741#ifndef IEM_WITH_SETJMP
11742# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11743 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11744# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11746#else
11747# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11748 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11749# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11750 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11751#endif
11752
11753
11754
11755#ifndef IEM_WITH_SETJMP
11756# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11757 do { \
11758 uint8_t u8Tmp; \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11760 (a_u16Dst) = u8Tmp; \
11761 } while (0)
11762# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11763 do { \
11764 uint8_t u8Tmp; \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11766 (a_u32Dst) = u8Tmp; \
11767 } while (0)
11768# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11769 do { \
11770 uint8_t u8Tmp; \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11772 (a_u64Dst) = u8Tmp; \
11773 } while (0)
11774# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11775 do { \
11776 uint16_t u16Tmp; \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11778 (a_u32Dst) = u16Tmp; \
11779 } while (0)
11780# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11781 do { \
11782 uint16_t u16Tmp; \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11784 (a_u64Dst) = u16Tmp; \
11785 } while (0)
11786# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11787 do { \
11788 uint32_t u32Tmp; \
11789 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11790 (a_u64Dst) = u32Tmp; \
11791 } while (0)
11792#else /* IEM_WITH_SETJMP */
11793# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11794 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11795# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11796 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11797# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11798 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11799# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11800 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11801# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11802 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11803# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11804 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11805#endif /* IEM_WITH_SETJMP */
11806
11807#ifndef IEM_WITH_SETJMP
11808# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11809 do { \
11810 uint8_t u8Tmp; \
11811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11812 (a_u16Dst) = (int8_t)u8Tmp; \
11813 } while (0)
11814# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11815 do { \
11816 uint8_t u8Tmp; \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11818 (a_u32Dst) = (int8_t)u8Tmp; \
11819 } while (0)
11820# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11821 do { \
11822 uint8_t u8Tmp; \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11824 (a_u64Dst) = (int8_t)u8Tmp; \
11825 } while (0)
11826# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11827 do { \
11828 uint16_t u16Tmp; \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11830 (a_u32Dst) = (int16_t)u16Tmp; \
11831 } while (0)
11832# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11833 do { \
11834 uint16_t u16Tmp; \
11835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11836 (a_u64Dst) = (int16_t)u16Tmp; \
11837 } while (0)
11838# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11839 do { \
11840 uint32_t u32Tmp; \
11841 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11842 (a_u64Dst) = (int32_t)u32Tmp; \
11843 } while (0)
11844#else /* IEM_WITH_SETJMP */
11845# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11846 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11847# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11848 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11849# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11850 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11851# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11852 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11853# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11854 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11855# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11856 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11857#endif /* IEM_WITH_SETJMP */
11858
11859#ifndef IEM_WITH_SETJMP
11860# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11862# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11863 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11864# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11865 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11866# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11867 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11868#else
11869# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11870 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11871# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11872 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11873# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11874 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11875# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11876 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11877#endif
11878
11879#ifndef IEM_WITH_SETJMP
11880# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11882# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11883 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11884# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11885 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11886# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11887 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11888#else
11889# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11890 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11891# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11892 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11893# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11894 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11895# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11896 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11897#endif
11898
11899#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11900#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11901#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11902#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11903#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11904#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11905#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11906 do { \
11907 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11908 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11909 } while (0)
11910
11911#ifndef IEM_WITH_SETJMP
11912# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11913 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11914# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11915 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11916#else
11917# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11918 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11919# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11920 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11921#endif
11922
11923#ifndef IEM_WITH_SETJMP
11924# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11925 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11926# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11927 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11928#else
11929# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11930 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11931# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11932 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11933#endif
11934
11935
11936#define IEM_MC_PUSH_U16(a_u16Value) \
11937 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11938#define IEM_MC_PUSH_U32(a_u32Value) \
11939 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11940#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11941 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11942#define IEM_MC_PUSH_U64(a_u64Value) \
11943 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11944
11945#define IEM_MC_POP_U16(a_pu16Value) \
11946 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11947#define IEM_MC_POP_U32(a_pu32Value) \
11948 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11949#define IEM_MC_POP_U64(a_pu64Value) \
11950 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11951
11952/** Maps guest memory for direct or bounce buffered access.
11953 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11954 * @remarks May return.
11955 */
11956#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11957 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11958
11959/** Maps guest memory for direct or bounce buffered access.
11960 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11961 * @remarks May return.
11962 */
11963#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11965
11966/** Commits the memory and unmaps the guest memory.
11967 * @remarks May return.
11968 */
11969#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11970 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11971
11972/** Commits the memory and unmaps the guest memory unless the FPU status word
11973 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11974 * that would cause FLD not to store.
11975 *
11976 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11977 * store, while \#P will not.
11978 *
11979 * @remarks May in theory return - for now.
11980 */
11981#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11982 do { \
11983 if ( !(a_u16FSW & X86_FSW_ES) \
11984 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11985 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11987 } while (0)
11988
11989/** Calculate efficient address from R/M. */
11990#ifndef IEM_WITH_SETJMP
11991# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11992 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11993#else
11994# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11995 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11996#endif
11997
11998#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11999#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12000#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12001#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12002#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12003#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12004#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12005
12006/**
12007 * Defers the rest of the instruction emulation to a C implementation routine
12008 * and returns, only taking the standard parameters.
12009 *
12010 * @param a_pfnCImpl The pointer to the C routine.
12011 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12012 */
12013#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12014
12015/**
12016 * Defers the rest of instruction emulation to a C implementation routine and
12017 * returns, taking one argument in addition to the standard ones.
12018 *
12019 * @param a_pfnCImpl The pointer to the C routine.
12020 * @param a0 The argument.
12021 */
12022#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12023
12024/**
12025 * Defers the rest of the instruction emulation to a C implementation routine
12026 * and returns, taking two arguments in addition to the standard ones.
12027 *
12028 * @param a_pfnCImpl The pointer to the C routine.
12029 * @param a0 The first extra argument.
12030 * @param a1 The second extra argument.
12031 */
12032#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12033
12034/**
12035 * Defers the rest of the instruction emulation to a C implementation routine
12036 * and returns, taking three arguments in addition to the standard ones.
12037 *
12038 * @param a_pfnCImpl The pointer to the C routine.
12039 * @param a0 The first extra argument.
12040 * @param a1 The second extra argument.
12041 * @param a2 The third extra argument.
12042 */
12043#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12044
12045/**
12046 * Defers the rest of the instruction emulation to a C implementation routine
12047 * and returns, taking four arguments in addition to the standard ones.
12048 *
12049 * @param a_pfnCImpl The pointer to the C routine.
12050 * @param a0 The first extra argument.
12051 * @param a1 The second extra argument.
12052 * @param a2 The third extra argument.
12053 * @param a3 The fourth extra argument.
12054 */
12055#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12056
12057/**
12058 * Defers the rest of the instruction emulation to a C implementation routine
12059 * and returns, taking two arguments in addition to the standard ones.
12060 *
12061 * @param a_pfnCImpl The pointer to the C routine.
12062 * @param a0 The first extra argument.
12063 * @param a1 The second extra argument.
12064 * @param a2 The third extra argument.
12065 * @param a3 The fourth extra argument.
12066 * @param a4 The fifth extra argument.
12067 */
12068#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12069
12070/**
12071 * Defers the entire instruction emulation to a C implementation routine and
12072 * returns, only taking the standard parameters.
12073 *
12074 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12075 *
12076 * @param a_pfnCImpl The pointer to the C routine.
12077 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12078 */
12079#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12080
12081/**
12082 * Defers the entire instruction emulation to a C implementation routine and
12083 * returns, taking one argument in addition to the standard ones.
12084 *
12085 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12086 *
12087 * @param a_pfnCImpl The pointer to the C routine.
12088 * @param a0 The argument.
12089 */
12090#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12091
12092/**
12093 * Defers the entire instruction emulation to a C implementation routine and
12094 * returns, taking two arguments in addition to the standard ones.
12095 *
12096 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12097 *
12098 * @param a_pfnCImpl The pointer to the C routine.
12099 * @param a0 The first extra argument.
12100 * @param a1 The second extra argument.
12101 */
12102#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12103
12104/**
12105 * Defers the entire instruction emulation to a C implementation routine and
12106 * returns, taking three arguments in addition to the standard ones.
12107 *
12108 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12109 *
12110 * @param a_pfnCImpl The pointer to the C routine.
12111 * @param a0 The first extra argument.
12112 * @param a1 The second extra argument.
12113 * @param a2 The third extra argument.
12114 */
12115#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12116
12117/**
12118 * Calls a FPU assembly implementation taking one visible argument.
12119 *
12120 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12121 * @param a0 The first extra argument.
12122 */
12123#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12124 do { \
12125 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12126 } while (0)
12127
12128/**
12129 * Calls a FPU assembly implementation taking two visible arguments.
12130 *
12131 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12132 * @param a0 The first extra argument.
12133 * @param a1 The second extra argument.
12134 */
12135#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12136 do { \
12137 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12138 } while (0)
12139
12140/**
12141 * Calls a FPU assembly implementation taking three visible arguments.
12142 *
12143 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12144 * @param a0 The first extra argument.
12145 * @param a1 The second extra argument.
12146 * @param a2 The third extra argument.
12147 */
12148#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12149 do { \
12150 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12151 } while (0)
12152
12153#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12154 do { \
12155 (a_FpuData).FSW = (a_FSW); \
12156 (a_FpuData).r80Result = *(a_pr80Value); \
12157 } while (0)
12158
12159/** Pushes FPU result onto the stack. */
12160#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12161 iemFpuPushResult(pVCpu, &a_FpuData)
12162/** Pushes FPU result onto the stack and sets the FPUDP. */
12163#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12164 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12165
12166/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12167#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12168 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12169
12170/** Stores FPU result in a stack register. */
12171#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12172 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12173/** Stores FPU result in a stack register and pops the stack. */
12174#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12175 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12176/** Stores FPU result in a stack register and sets the FPUDP. */
12177#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12178 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12179/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12180 * stack. */
12181#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12182 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12183
12184/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12185#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12186 iemFpuUpdateOpcodeAndIp(pVCpu)
12187/** Free a stack register (for FFREE and FFREEP). */
12188#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12189 iemFpuStackFree(pVCpu, a_iStReg)
12190/** Increment the FPU stack pointer. */
12191#define IEM_MC_FPU_STACK_INC_TOP() \
12192 iemFpuStackIncTop(pVCpu)
12193/** Decrement the FPU stack pointer. */
12194#define IEM_MC_FPU_STACK_DEC_TOP() \
12195 iemFpuStackDecTop(pVCpu)
12196
12197/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12198#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12199 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12200/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12201#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12202 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12203/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12204#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12205 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12206/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12207#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12208 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12209/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12210 * stack. */
12211#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12212 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12213/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12214#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12215 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12216
12217/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12218#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12219 iemFpuStackUnderflow(pVCpu, a_iStDst)
12220/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12221 * stack. */
12222#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12223 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12224/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12225 * FPUDS. */
12226#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12227 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12228/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12229 * FPUDS. Pops stack. */
12230#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12231 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12232/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12233 * stack twice. */
12234#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12235 iemFpuStackUnderflowThenPopPop(pVCpu)
12236/** Raises a FPU stack underflow exception for an instruction pushing a result
12237 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12238#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12239 iemFpuStackPushUnderflow(pVCpu)
12240/** Raises a FPU stack underflow exception for an instruction pushing a result
12241 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12242#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12243 iemFpuStackPushUnderflowTwo(pVCpu)
12244
12245/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12246 * FPUIP, FPUCS and FOP. */
12247#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12248 iemFpuStackPushOverflow(pVCpu)
12249/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12250 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12251#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12252 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12253/** Prepares for using the FPU state.
12254 * Ensures that we can use the host FPU in the current context (RC+R0.
12255 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12256#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12257/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12258#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12259/** Actualizes the guest FPU state so it can be accessed and modified. */
12260#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12261
12262/** Prepares for using the SSE state.
12263 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12264 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12265#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12266/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12267#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12268/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12269#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12270
12271/** Prepares for using the AVX state.
12272 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12273 * Ensures the guest AVX state in the CPUMCTX is up to date.
12274 * @note This will include the AVX512 state too when support for it is added
12275 * due to the zero extending feature of VEX instruction. */
12276#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12277/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12278#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12279/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12280#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12281
12282/**
12283 * Calls a MMX assembly implementation taking two visible arguments.
12284 *
12285 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12286 * @param a0 The first extra argument.
12287 * @param a1 The second extra argument.
12288 */
12289#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12290 do { \
12291 IEM_MC_PREPARE_FPU_USAGE(); \
12292 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12293 } while (0)
12294
12295/**
12296 * Calls a MMX assembly implementation taking three visible arguments.
12297 *
12298 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12299 * @param a0 The first extra argument.
12300 * @param a1 The second extra argument.
12301 * @param a2 The third extra argument.
12302 */
12303#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12304 do { \
12305 IEM_MC_PREPARE_FPU_USAGE(); \
12306 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12307 } while (0)
12308
12309
12310/**
12311 * Calls a SSE assembly implementation taking two visible arguments.
12312 *
12313 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12314 * @param a0 The first extra argument.
12315 * @param a1 The second extra argument.
12316 */
12317#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12318 do { \
12319 IEM_MC_PREPARE_SSE_USAGE(); \
12320 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12321 } while (0)
12322
12323/**
12324 * Calls a SSE assembly implementation taking three visible arguments.
12325 *
12326 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12327 * @param a0 The first extra argument.
12328 * @param a1 The second extra argument.
12329 * @param a2 The third extra argument.
12330 */
12331#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12332 do { \
12333 IEM_MC_PREPARE_SSE_USAGE(); \
12334 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12335 } while (0)
12336
12337
12338/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12339 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12340#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12341 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12342
12343/**
12344 * Calls a AVX assembly implementation taking two visible arguments.
12345 *
12346 * There is one implicit zero'th argument, a pointer to the extended state.
12347 *
12348 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12349 * @param a1 The first extra argument.
12350 * @param a2 The second extra argument.
12351 */
12352#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12353 do { \
12354 IEM_MC_PREPARE_AVX_USAGE(); \
12355 a_pfnAImpl(pXState, (a1), (a2)); \
12356 } while (0)
12357
12358/**
12359 * Calls a AVX assembly implementation taking three visible arguments.
12360 *
12361 * There is one implicit zero'th argument, a pointer to the extended state.
12362 *
12363 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12364 * @param a1 The first extra argument.
12365 * @param a2 The second extra argument.
12366 * @param a3 The third extra argument.
12367 */
12368#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12369 do { \
12370 IEM_MC_PREPARE_AVX_USAGE(); \
12371 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12372 } while (0)
12373
12374/** @note Not for IOPL or IF testing. */
12375#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12376/** @note Not for IOPL or IF testing. */
12377#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12378/** @note Not for IOPL or IF testing. */
12379#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12380/** @note Not for IOPL or IF testing. */
12381#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12382/** @note Not for IOPL or IF testing. */
12383#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12384 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12385 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12386/** @note Not for IOPL or IF testing. */
12387#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12388 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12389 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12390/** @note Not for IOPL or IF testing. */
12391#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12392 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12393 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12394 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12395/** @note Not for IOPL or IF testing. */
12396#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12397 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12398 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12399 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12400#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12401#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12402#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12403/** @note Not for IOPL or IF testing. */
12404#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12405 if ( pVCpu->cpum.GstCtx.cx != 0 \
12406 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12407/** @note Not for IOPL or IF testing. */
12408#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12409 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12410 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12411/** @note Not for IOPL or IF testing. */
12412#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12413 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12414 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12415/** @note Not for IOPL or IF testing. */
12416#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12417 if ( pVCpu->cpum.GstCtx.cx != 0 \
12418 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12421 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12422 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12423/** @note Not for IOPL or IF testing. */
12424#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12425 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12426 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12427#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12428#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12429
12430#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12431 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12432#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12433 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12434#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12435 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12436#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12437 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12438#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12439 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12440#define IEM_MC_IF_FCW_IM() \
12441 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12442
12443#define IEM_MC_ELSE() } else {
12444#define IEM_MC_ENDIF() } do {} while (0)
12445
12446/** @} */
12447
12448
12449/** @name Opcode Debug Helpers.
12450 * @{
12451 */
12452#ifdef VBOX_WITH_STATISTICS
12453# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12454#else
12455# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12456#endif
12457
12458#ifdef DEBUG
12459# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12460 do { \
12461 IEMOP_INC_STATS(a_Stats); \
12462 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12463 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12464 } while (0)
12465
12466# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12467 do { \
12468 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12469 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12470 (void)RT_CONCAT(OP_,a_Upper); \
12471 (void)(a_fDisHints); \
12472 (void)(a_fIemHints); \
12473 } while (0)
12474
12475# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12476 do { \
12477 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12478 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12479 (void)RT_CONCAT(OP_,a_Upper); \
12480 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12481 (void)(a_fDisHints); \
12482 (void)(a_fIemHints); \
12483 } while (0)
12484
12485# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12486 do { \
12487 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12488 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12489 (void)RT_CONCAT(OP_,a_Upper); \
12490 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12491 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12492 (void)(a_fDisHints); \
12493 (void)(a_fIemHints); \
12494 } while (0)
12495
12496# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12497 do { \
12498 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12499 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12500 (void)RT_CONCAT(OP_,a_Upper); \
12501 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12502 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12503 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12504 (void)(a_fDisHints); \
12505 (void)(a_fIemHints); \
12506 } while (0)
12507
12508# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12509 do { \
12510 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12511 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12512 (void)RT_CONCAT(OP_,a_Upper); \
12513 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12514 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12515 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12516 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12517 (void)(a_fDisHints); \
12518 (void)(a_fIemHints); \
12519 } while (0)
12520
12521#else
12522# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12523
12524# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12525 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12526# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12527 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12528# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12529 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12530# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12531 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12532# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12533 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12534
12535#endif
12536
12537#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12538 IEMOP_MNEMONIC0EX(a_Lower, \
12539 #a_Lower, \
12540 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12541#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12542 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12543 #a_Lower " " #a_Op1, \
12544 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12545#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12546 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12547 #a_Lower " " #a_Op1 "," #a_Op2, \
12548 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12549#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12550 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12551 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12552 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12553#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12554 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12555 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12556 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12557
12558/** @} */
12559
12560
12561/** @name Opcode Helpers.
12562 * @{
12563 */
12564
12565#ifdef IN_RING3
12566# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12567 do { \
12568 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12569 else \
12570 { \
12571 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12572 return IEMOP_RAISE_INVALID_OPCODE(); \
12573 } \
12574 } while (0)
12575#else
12576# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12577 do { \
12578 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12579 else return IEMOP_RAISE_INVALID_OPCODE(); \
12580 } while (0)
12581#endif
12582
12583/** The instruction requires a 186 or later. */
12584#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12585# define IEMOP_HLP_MIN_186() do { } while (0)
12586#else
12587# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12588#endif
12589
12590/** The instruction requires a 286 or later. */
12591#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12592# define IEMOP_HLP_MIN_286() do { } while (0)
12593#else
12594# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12595#endif
12596
12597/** The instruction requires a 386 or later. */
12598#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12599# define IEMOP_HLP_MIN_386() do { } while (0)
12600#else
12601# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12602#endif
12603
12604/** The instruction requires a 386 or later if the given expression is true. */
12605#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12606# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12607#else
12608# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12609#endif
12610
12611/** The instruction requires a 486 or later. */
12612#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12613# define IEMOP_HLP_MIN_486() do { } while (0)
12614#else
12615# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12616#endif
12617
12618/** The instruction requires a Pentium (586) or later. */
12619#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12620# define IEMOP_HLP_MIN_586() do { } while (0)
12621#else
12622# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12623#endif
12624
12625/** The instruction requires a PentiumPro (686) or later. */
12626#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12627# define IEMOP_HLP_MIN_686() do { } while (0)
12628#else
12629# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12630#endif
12631
12632
12633/** The instruction raises an \#UD in real and V8086 mode. */
12634#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12635 do \
12636 { \
12637 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12638 else return IEMOP_RAISE_INVALID_OPCODE(); \
12639 } while (0)
12640
12641#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12642/** This instruction raises an \#UD in real and V8086 mode or when not using a
12643 * 64-bit code segment when in long mode (applicable to all VMX instructions
12644 * except VMCALL).
12645 */
12646#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12647 do \
12648 { \
12649 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12650 && ( !IEM_IS_LONG_MODE(pVCpu) \
12651 || IEM_IS_64BIT_CODE(pVCpu))) \
12652 { /* likely */ } \
12653 else \
12654 { \
12655 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12656 { \
12657 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12658 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12659 return IEMOP_RAISE_INVALID_OPCODE(); \
12660 } \
12661 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12662 { \
12663 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12664 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12665 return IEMOP_RAISE_INVALID_OPCODE(); \
12666 } \
12667 } \
12668 } while (0)
12669
12670/** The instruction can only be executed in VMX operation (VMX root mode and
12671 * non-root mode).
12672 *
12673 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12674 */
12675# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12676 do \
12677 { \
12678 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12679 else \
12680 { \
12681 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12682 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12683 return IEMOP_RAISE_INVALID_OPCODE(); \
12684 } \
12685 } while (0)
12686#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12687
12688/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12689 * 64-bit mode. */
12690#define IEMOP_HLP_NO_64BIT() \
12691 do \
12692 { \
12693 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12694 return IEMOP_RAISE_INVALID_OPCODE(); \
12695 } while (0)
12696
12697/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12698 * 64-bit mode. */
12699#define IEMOP_HLP_ONLY_64BIT() \
12700 do \
12701 { \
12702 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12703 return IEMOP_RAISE_INVALID_OPCODE(); \
12704 } while (0)
12705
12706/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12707#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12708 do \
12709 { \
12710 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12711 iemRecalEffOpSize64Default(pVCpu); \
12712 } while (0)
12713
12714/** The instruction has 64-bit operand size if 64-bit mode. */
12715#define IEMOP_HLP_64BIT_OP_SIZE() \
12716 do \
12717 { \
12718 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12719 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12720 } while (0)
12721
12722/** Only a REX prefix immediately preceeding the first opcode byte takes
12723 * effect. This macro helps ensuring this as well as logging bad guest code. */
12724#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12725 do \
12726 { \
12727 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12728 { \
12729 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12730 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12731 pVCpu->iem.s.uRexB = 0; \
12732 pVCpu->iem.s.uRexIndex = 0; \
12733 pVCpu->iem.s.uRexReg = 0; \
12734 iemRecalEffOpSize(pVCpu); \
12735 } \
12736 } while (0)
12737
12738/**
12739 * Done decoding.
12740 */
12741#define IEMOP_HLP_DONE_DECODING() \
12742 do \
12743 { \
12744 /*nothing for now, maybe later... */ \
12745 } while (0)
12746
12747/**
12748 * Done decoding, raise \#UD exception if lock prefix present.
12749 */
12750#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12751 do \
12752 { \
12753 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12754 { /* likely */ } \
12755 else \
12756 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12757 } while (0)
12758
12759
12760/**
12761 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12762 * repnz or size prefixes are present, or if in real or v8086 mode.
12763 */
12764#define IEMOP_HLP_DONE_VEX_DECODING() \
12765 do \
12766 { \
12767 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12768 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12769 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12770 { /* likely */ } \
12771 else \
12772 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12773 } while (0)
12774
12775/**
12776 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12777 * repnz or size prefixes are present, or if in real or v8086 mode.
12778 */
12779#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12780 do \
12781 { \
12782 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12783 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12784 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12785 && pVCpu->iem.s.uVexLength == 0)) \
12786 { /* likely */ } \
12787 else \
12788 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12789 } while (0)
12790
12791
12792/**
12793 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12794 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12795 * register 0, or if in real or v8086 mode.
12796 */
12797#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12798 do \
12799 { \
12800 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12801 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12802 && !pVCpu->iem.s.uVex3rdReg \
12803 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12804 { /* likely */ } \
12805 else \
12806 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12807 } while (0)
12808
12809/**
12810 * Done decoding VEX, no V, L=0.
12811 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12812 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12813 */
12814#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12815 do \
12816 { \
12817 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12818 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12819 && pVCpu->iem.s.uVexLength == 0 \
12820 && pVCpu->iem.s.uVex3rdReg == 0 \
12821 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12822 { /* likely */ } \
12823 else \
12824 return IEMOP_RAISE_INVALID_OPCODE(); \
12825 } while (0)
12826
12827#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12828 do \
12829 { \
12830 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12831 { /* likely */ } \
12832 else \
12833 { \
12834 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12835 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12836 } \
12837 } while (0)
12838#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12839 do \
12840 { \
12841 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12842 { /* likely */ } \
12843 else \
12844 { \
12845 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12846 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12847 } \
12848 } while (0)
12849
12850/**
12851 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12852 * are present.
12853 */
12854#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12855 do \
12856 { \
12857 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12858 { /* likely */ } \
12859 else \
12860 return IEMOP_RAISE_INVALID_OPCODE(); \
12861 } while (0)
12862
12863/**
12864 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12865 * prefixes are present.
12866 */
12867#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12868 do \
12869 { \
12870 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12871 { /* likely */ } \
12872 else \
12873 return IEMOP_RAISE_INVALID_OPCODE(); \
12874 } while (0)
12875
12876
12877/**
12878 * Calculates the effective address of a ModR/M memory operand.
12879 *
12880 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12881 *
12882 * @return Strict VBox status code.
12883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12884 * @param bRm The ModRM byte.
12885 * @param cbImm The size of any immediate following the
12886 * effective address opcode bytes. Important for
12887 * RIP relative addressing.
12888 * @param pGCPtrEff Where to return the effective address.
12889 */
12890IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12891{
12892 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12893# define SET_SS_DEF() \
12894 do \
12895 { \
12896 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12897 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12898 } while (0)
12899
12900 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12901 {
12902/** @todo Check the effective address size crap! */
12903 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12904 {
12905 uint16_t u16EffAddr;
12906
12907 /* Handle the disp16 form with no registers first. */
12908 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12909 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12910 else
12911 {
12912 /* Get the displacment. */
12913 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12914 {
12915 case 0: u16EffAddr = 0; break;
12916 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12917 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12918 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12919 }
12920
12921 /* Add the base and index registers to the disp. */
12922 switch (bRm & X86_MODRM_RM_MASK)
12923 {
12924 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12925 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12926 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12927 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12928 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12929 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12930 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12931 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12932 }
12933 }
12934
12935 *pGCPtrEff = u16EffAddr;
12936 }
12937 else
12938 {
12939 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12940 uint32_t u32EffAddr;
12941
12942 /* Handle the disp32 form with no registers first. */
12943 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12944 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12945 else
12946 {
12947 /* Get the register (or SIB) value. */
12948 switch ((bRm & X86_MODRM_RM_MASK))
12949 {
12950 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12951 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12952 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12953 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12954 case 4: /* SIB */
12955 {
12956 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12957
12958 /* Get the index and scale it. */
12959 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12960 {
12961 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12962 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12963 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12964 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12965 case 4: u32EffAddr = 0; /*none */ break;
12966 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12967 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12968 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12969 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12970 }
12971 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12972
12973 /* add base */
12974 switch (bSib & X86_SIB_BASE_MASK)
12975 {
12976 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12977 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12978 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12979 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12980 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12981 case 5:
12982 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12983 {
12984 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12985 SET_SS_DEF();
12986 }
12987 else
12988 {
12989 uint32_t u32Disp;
12990 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12991 u32EffAddr += u32Disp;
12992 }
12993 break;
12994 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12995 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12996 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12997 }
12998 break;
12999 }
13000 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13001 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13002 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13003 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13004 }
13005
13006 /* Get and add the displacement. */
13007 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13008 {
13009 case 0:
13010 break;
13011 case 1:
13012 {
13013 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13014 u32EffAddr += i8Disp;
13015 break;
13016 }
13017 case 2:
13018 {
13019 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13020 u32EffAddr += u32Disp;
13021 break;
13022 }
13023 default:
13024 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13025 }
13026
13027 }
13028 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13029 *pGCPtrEff = u32EffAddr;
13030 else
13031 {
13032 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13033 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13034 }
13035 }
13036 }
13037 else
13038 {
13039 uint64_t u64EffAddr;
13040
13041 /* Handle the rip+disp32 form with no registers first. */
13042 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13043 {
13044 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13045 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13046 }
13047 else
13048 {
13049 /* Get the register (or SIB) value. */
13050 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13051 {
13052 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13053 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13054 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13055 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13056 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13057 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13058 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13059 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13060 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13061 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13062 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13063 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13064 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13065 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13066 /* SIB */
13067 case 4:
13068 case 12:
13069 {
13070 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13071
13072 /* Get the index and scale it. */
13073 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13074 {
13075 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13076 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13077 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13078 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13079 case 4: u64EffAddr = 0; /*none */ break;
13080 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13081 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13082 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13083 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13084 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13085 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13086 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13087 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13088 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13089 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13090 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13091 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13092 }
13093 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13094
13095 /* add base */
13096 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13097 {
13098 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13099 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13100 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13101 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13102 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13103 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13104 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13105 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13106 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13107 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13108 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13109 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13110 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13111 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13112 /* complicated encodings */
13113 case 5:
13114 case 13:
13115 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13116 {
13117 if (!pVCpu->iem.s.uRexB)
13118 {
13119 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13120 SET_SS_DEF();
13121 }
13122 else
13123 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13124 }
13125 else
13126 {
13127 uint32_t u32Disp;
13128 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13129 u64EffAddr += (int32_t)u32Disp;
13130 }
13131 break;
13132 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13133 }
13134 break;
13135 }
13136 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13137 }
13138
13139 /* Get and add the displacement. */
13140 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13141 {
13142 case 0:
13143 break;
13144 case 1:
13145 {
13146 int8_t i8Disp;
13147 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13148 u64EffAddr += i8Disp;
13149 break;
13150 }
13151 case 2:
13152 {
13153 uint32_t u32Disp;
13154 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13155 u64EffAddr += (int32_t)u32Disp;
13156 break;
13157 }
13158 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13159 }
13160
13161 }
13162
13163 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13164 *pGCPtrEff = u64EffAddr;
13165 else
13166 {
13167 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13168 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13169 }
13170 }
13171
13172 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13173 return VINF_SUCCESS;
13174}
13175
13176
13177/**
13178 * Calculates the effective address of a ModR/M memory operand.
13179 *
13180 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13181 *
13182 * @return Strict VBox status code.
13183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13184 * @param bRm The ModRM byte.
13185 * @param cbImm The size of any immediate following the
13186 * effective address opcode bytes. Important for
13187 * RIP relative addressing.
13188 * @param pGCPtrEff Where to return the effective address.
13189 * @param offRsp RSP displacement.
13190 */
13191IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13192{
13193 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13194# define SET_SS_DEF() \
13195 do \
13196 { \
13197 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13198 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13199 } while (0)
13200
13201 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13202 {
13203/** @todo Check the effective address size crap! */
13204 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13205 {
13206 uint16_t u16EffAddr;
13207
13208 /* Handle the disp16 form with no registers first. */
13209 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13210 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13211 else
13212 {
13213 /* Get the displacment. */
13214 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13215 {
13216 case 0: u16EffAddr = 0; break;
13217 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13218 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13219 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13220 }
13221
13222 /* Add the base and index registers to the disp. */
13223 switch (bRm & X86_MODRM_RM_MASK)
13224 {
13225 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13226 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13227 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13228 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13229 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13230 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13231 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13232 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13233 }
13234 }
13235
13236 *pGCPtrEff = u16EffAddr;
13237 }
13238 else
13239 {
13240 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13241 uint32_t u32EffAddr;
13242
13243 /* Handle the disp32 form with no registers first. */
13244 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13245 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13246 else
13247 {
13248 /* Get the register (or SIB) value. */
13249 switch ((bRm & X86_MODRM_RM_MASK))
13250 {
13251 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13252 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13253 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13254 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13255 case 4: /* SIB */
13256 {
13257 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13258
13259 /* Get the index and scale it. */
13260 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13261 {
13262 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13263 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13264 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13265 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13266 case 4: u32EffAddr = 0; /*none */ break;
13267 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13268 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13269 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13270 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13271 }
13272 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13273
13274 /* add base */
13275 switch (bSib & X86_SIB_BASE_MASK)
13276 {
13277 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13278 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13279 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13280 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13281 case 4:
13282 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13283 SET_SS_DEF();
13284 break;
13285 case 5:
13286 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13287 {
13288 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13289 SET_SS_DEF();
13290 }
13291 else
13292 {
13293 uint32_t u32Disp;
13294 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13295 u32EffAddr += u32Disp;
13296 }
13297 break;
13298 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13299 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13300 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13301 }
13302 break;
13303 }
13304 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13305 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13306 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13307 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13308 }
13309
13310 /* Get and add the displacement. */
13311 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13312 {
13313 case 0:
13314 break;
13315 case 1:
13316 {
13317 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13318 u32EffAddr += i8Disp;
13319 break;
13320 }
13321 case 2:
13322 {
13323 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13324 u32EffAddr += u32Disp;
13325 break;
13326 }
13327 default:
13328 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13329 }
13330
13331 }
13332 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13333 *pGCPtrEff = u32EffAddr;
13334 else
13335 {
13336 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13337 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13338 }
13339 }
13340 }
13341 else
13342 {
13343 uint64_t u64EffAddr;
13344
13345 /* Handle the rip+disp32 form with no registers first. */
13346 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13347 {
13348 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13349 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13350 }
13351 else
13352 {
13353 /* Get the register (or SIB) value. */
13354 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13355 {
13356 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13357 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13358 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13359 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13360 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13361 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13362 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13363 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13364 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13365 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13366 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13367 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13368 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13369 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13370 /* SIB */
13371 case 4:
13372 case 12:
13373 {
13374 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13375
13376 /* Get the index and scale it. */
13377 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13378 {
13379 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13380 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13381 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13382 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13383 case 4: u64EffAddr = 0; /*none */ break;
13384 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13385 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13386 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13387 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13388 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13389 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13390 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13391 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13392 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13393 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13394 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13395 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13396 }
13397 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13398
13399 /* add base */
13400 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13401 {
13402 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13403 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13404 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13405 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13406 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13407 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13408 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13409 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13410 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13411 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13412 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13413 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13414 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13415 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13416 /* complicated encodings */
13417 case 5:
13418 case 13:
13419 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13420 {
13421 if (!pVCpu->iem.s.uRexB)
13422 {
13423 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13424 SET_SS_DEF();
13425 }
13426 else
13427 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13428 }
13429 else
13430 {
13431 uint32_t u32Disp;
13432 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13433 u64EffAddr += (int32_t)u32Disp;
13434 }
13435 break;
13436 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13437 }
13438 break;
13439 }
13440 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13441 }
13442
13443 /* Get and add the displacement. */
13444 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13445 {
13446 case 0:
13447 break;
13448 case 1:
13449 {
13450 int8_t i8Disp;
13451 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13452 u64EffAddr += i8Disp;
13453 break;
13454 }
13455 case 2:
13456 {
13457 uint32_t u32Disp;
13458 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13459 u64EffAddr += (int32_t)u32Disp;
13460 break;
13461 }
13462 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13463 }
13464
13465 }
13466
13467 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13468 *pGCPtrEff = u64EffAddr;
13469 else
13470 {
13471 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13472 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13473 }
13474 }
13475
13476 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13477 return VINF_SUCCESS;
13478}
13479
13480
13481#ifdef IEM_WITH_SETJMP
13482/**
13483 * Calculates the effective address of a ModR/M memory operand.
13484 *
13485 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13486 *
13487 * May longjmp on internal error.
13488 *
13489 * @return The effective address.
13490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13491 * @param bRm The ModRM byte.
13492 * @param cbImm The size of any immediate following the
13493 * effective address opcode bytes. Important for
13494 * RIP relative addressing.
13495 */
13496IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13497{
13498 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13499# define SET_SS_DEF() \
13500 do \
13501 { \
13502 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13503 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13504 } while (0)
13505
13506 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13507 {
13508/** @todo Check the effective address size crap! */
13509 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13510 {
13511 uint16_t u16EffAddr;
13512
13513 /* Handle the disp16 form with no registers first. */
13514 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13515 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13516 else
13517 {
13518 /* Get the displacment. */
13519 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13520 {
13521 case 0: u16EffAddr = 0; break;
13522 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13523 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13524 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13525 }
13526
13527 /* Add the base and index registers to the disp. */
13528 switch (bRm & X86_MODRM_RM_MASK)
13529 {
13530 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13531 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13532 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13533 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13534 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13535 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13536 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13537 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13538 }
13539 }
13540
13541 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13542 return u16EffAddr;
13543 }
13544
13545 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13546 uint32_t u32EffAddr;
13547
13548 /* Handle the disp32 form with no registers first. */
13549 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13550 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13551 else
13552 {
13553 /* Get the register (or SIB) value. */
13554 switch ((bRm & X86_MODRM_RM_MASK))
13555 {
13556 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13557 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13558 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13559 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13560 case 4: /* SIB */
13561 {
13562 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13563
13564 /* Get the index and scale it. */
13565 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13566 {
13567 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13568 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13569 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13570 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13571 case 4: u32EffAddr = 0; /*none */ break;
13572 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13573 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13574 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13575 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13576 }
13577 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13578
13579 /* add base */
13580 switch (bSib & X86_SIB_BASE_MASK)
13581 {
13582 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13583 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13584 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13585 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13586 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13587 case 5:
13588 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13589 {
13590 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13591 SET_SS_DEF();
13592 }
13593 else
13594 {
13595 uint32_t u32Disp;
13596 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13597 u32EffAddr += u32Disp;
13598 }
13599 break;
13600 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13601 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13602 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13603 }
13604 break;
13605 }
13606 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13607 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13608 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13609 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13610 }
13611
13612 /* Get and add the displacement. */
13613 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13614 {
13615 case 0:
13616 break;
13617 case 1:
13618 {
13619 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13620 u32EffAddr += i8Disp;
13621 break;
13622 }
13623 case 2:
13624 {
13625 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13626 u32EffAddr += u32Disp;
13627 break;
13628 }
13629 default:
13630 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13631 }
13632 }
13633
13634 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13635 {
13636 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13637 return u32EffAddr;
13638 }
13639 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13640 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13641 return u32EffAddr & UINT16_MAX;
13642 }
13643
13644 uint64_t u64EffAddr;
13645
13646 /* Handle the rip+disp32 form with no registers first. */
13647 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13648 {
13649 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13650 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13651 }
13652 else
13653 {
13654 /* Get the register (or SIB) value. */
13655 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13656 {
13657 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13658 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13659 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13660 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13661 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13662 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13663 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13664 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13665 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13666 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13667 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13668 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13669 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13670 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13671 /* SIB */
13672 case 4:
13673 case 12:
13674 {
13675 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13676
13677 /* Get the index and scale it. */
13678 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13679 {
13680 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13681 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13682 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13683 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13684 case 4: u64EffAddr = 0; /*none */ break;
13685 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13686 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13687 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13688 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13689 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13690 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13691 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13692 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13693 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13694 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13695 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13696 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13697 }
13698 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13699
13700 /* add base */
13701 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13702 {
13703 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13704 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13705 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13706 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13707 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13708 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13709 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13710 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13711 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13712 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13713 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13714 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13715 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13716 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13717 /* complicated encodings */
13718 case 5:
13719 case 13:
13720 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13721 {
13722 if (!pVCpu->iem.s.uRexB)
13723 {
13724 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13725 SET_SS_DEF();
13726 }
13727 else
13728 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13729 }
13730 else
13731 {
13732 uint32_t u32Disp;
13733 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13734 u64EffAddr += (int32_t)u32Disp;
13735 }
13736 break;
13737 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13738 }
13739 break;
13740 }
13741 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13742 }
13743
13744 /* Get and add the displacement. */
13745 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13746 {
13747 case 0:
13748 break;
13749 case 1:
13750 {
13751 int8_t i8Disp;
13752 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13753 u64EffAddr += i8Disp;
13754 break;
13755 }
13756 case 2:
13757 {
13758 uint32_t u32Disp;
13759 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13760 u64EffAddr += (int32_t)u32Disp;
13761 break;
13762 }
13763 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13764 }
13765
13766 }
13767
13768 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13769 {
13770 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13771 return u64EffAddr;
13772 }
13773 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13774 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13775 return u64EffAddr & UINT32_MAX;
13776}
13777#endif /* IEM_WITH_SETJMP */
13778
13779/** @} */
13780
13781
13782
13783/*
13784 * Include the instructions
13785 */
13786#include "IEMAllInstructions.cpp.h"
13787
13788
13789
13790#ifdef LOG_ENABLED
13791/**
13792 * Logs the current instruction.
13793 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13794 * @param fSameCtx Set if we have the same context information as the VMM,
13795 * clear if we may have already executed an instruction in
13796 * our debug context. When clear, we assume IEMCPU holds
13797 * valid CPU mode info.
13798 *
13799 * The @a fSameCtx parameter is now misleading and obsolete.
13800 * @param pszFunction The IEM function doing the execution.
13801 */
13802IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13803{
13804# ifdef IN_RING3
13805 if (LogIs2Enabled())
13806 {
13807 char szInstr[256];
13808 uint32_t cbInstr = 0;
13809 if (fSameCtx)
13810 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13811 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13812 szInstr, sizeof(szInstr), &cbInstr);
13813 else
13814 {
13815 uint32_t fFlags = 0;
13816 switch (pVCpu->iem.s.enmCpuMode)
13817 {
13818 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13819 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13820 case IEMMODE_16BIT:
13821 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13822 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13823 else
13824 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13825 break;
13826 }
13827 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13828 szInstr, sizeof(szInstr), &cbInstr);
13829 }
13830
13831 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13832 Log2(("**** %s\n"
13833 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13834 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13835 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13836 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13837 " %s\n"
13838 , pszFunction,
13839 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13840 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13841 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13842 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13843 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13844 szInstr));
13845
13846 if (LogIs3Enabled())
13847 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13848 }
13849 else
13850# endif
13851 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13852 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13853 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13854}
13855#endif /* LOG_ENABLED */
13856
13857
13858/**
13859 * Makes status code addjustments (pass up from I/O and access handler)
13860 * as well as maintaining statistics.
13861 *
13862 * @returns Strict VBox status code to pass up.
13863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13864 * @param rcStrict The status from executing an instruction.
13865 */
13866DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13867{
13868 if (rcStrict != VINF_SUCCESS)
13869 {
13870 if (RT_SUCCESS(rcStrict))
13871 {
13872 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13873 || rcStrict == VINF_IOM_R3_IOPORT_READ
13874 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13875 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13876 || rcStrict == VINF_IOM_R3_MMIO_READ
13877 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13878 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13879 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13880 || rcStrict == VINF_CPUM_R3_MSR_READ
13881 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13882 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13883 || rcStrict == VINF_EM_RAW_TO_R3
13884 || rcStrict == VINF_EM_TRIPLE_FAULT
13885 || rcStrict == VINF_GIM_R3_HYPERCALL
13886 /* raw-mode / virt handlers only: */
13887 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13888 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13889 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13890 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13891 || rcStrict == VINF_SELM_SYNC_GDT
13892 || rcStrict == VINF_CSAM_PENDING_ACTION
13893 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13894 /* nested hw.virt codes: */
13895 || rcStrict == VINF_VMX_VMEXIT
13896 || rcStrict == VINF_SVM_VMEXIT
13897 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13898/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13899 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13900#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13901 if ( rcStrict == VINF_VMX_VMEXIT
13902 && rcPassUp == VINF_SUCCESS)
13903 rcStrict = VINF_SUCCESS;
13904 else
13905#endif
13906#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13907 if ( rcStrict == VINF_SVM_VMEXIT
13908 && rcPassUp == VINF_SUCCESS)
13909 rcStrict = VINF_SUCCESS;
13910 else
13911#endif
13912 if (rcPassUp == VINF_SUCCESS)
13913 pVCpu->iem.s.cRetInfStatuses++;
13914 else if ( rcPassUp < VINF_EM_FIRST
13915 || rcPassUp > VINF_EM_LAST
13916 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13917 {
13918 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13919 pVCpu->iem.s.cRetPassUpStatus++;
13920 rcStrict = rcPassUp;
13921 }
13922 else
13923 {
13924 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13925 pVCpu->iem.s.cRetInfStatuses++;
13926 }
13927 }
13928 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13929 pVCpu->iem.s.cRetAspectNotImplemented++;
13930 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13931 pVCpu->iem.s.cRetInstrNotImplemented++;
13932 else
13933 pVCpu->iem.s.cRetErrStatuses++;
13934 }
13935 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13936 {
13937 pVCpu->iem.s.cRetPassUpStatus++;
13938 rcStrict = pVCpu->iem.s.rcPassUp;
13939 }
13940
13941 return rcStrict;
13942}
13943
13944
13945/**
13946 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13947 * IEMExecOneWithPrefetchedByPC.
13948 *
13949 * Similar code is found in IEMExecLots.
13950 *
13951 * @return Strict VBox status code.
13952 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13953 * @param fExecuteInhibit If set, execute the instruction following CLI,
13954 * POP SS and MOV SS,GR.
13955 * @param pszFunction The calling function name.
13956 */
13957DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13958{
13959 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13960 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13961 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13962 RT_NOREF_PV(pszFunction);
13963
13964#ifdef IEM_WITH_SETJMP
13965 VBOXSTRICTRC rcStrict;
13966 jmp_buf JmpBuf;
13967 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13968 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13969 if ((rcStrict = setjmp(JmpBuf)) == 0)
13970 {
13971 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13972 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13973 }
13974 else
13975 pVCpu->iem.s.cLongJumps++;
13976 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13977#else
13978 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13979 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13980#endif
13981 if (rcStrict == VINF_SUCCESS)
13982 pVCpu->iem.s.cInstructions++;
13983 if (pVCpu->iem.s.cActiveMappings > 0)
13984 {
13985 Assert(rcStrict != VINF_SUCCESS);
13986 iemMemRollback(pVCpu);
13987 }
13988 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13989 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13990 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13991
13992//#ifdef DEBUG
13993// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13994//#endif
13995
13996 /* Execute the next instruction as well if a cli, pop ss or
13997 mov ss, Gr has just completed successfully. */
13998 if ( fExecuteInhibit
13999 && rcStrict == VINF_SUCCESS
14000 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14001 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14002 {
14003 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14004 if (rcStrict == VINF_SUCCESS)
14005 {
14006#ifdef LOG_ENABLED
14007 iemLogCurInstr(pVCpu, false, pszFunction);
14008#endif
14009#ifdef IEM_WITH_SETJMP
14010 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14011 if ((rcStrict = setjmp(JmpBuf)) == 0)
14012 {
14013 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14014 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14015 }
14016 else
14017 pVCpu->iem.s.cLongJumps++;
14018 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14019#else
14020 IEM_OPCODE_GET_NEXT_U8(&b);
14021 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14022#endif
14023 if (rcStrict == VINF_SUCCESS)
14024 pVCpu->iem.s.cInstructions++;
14025 if (pVCpu->iem.s.cActiveMappings > 0)
14026 {
14027 Assert(rcStrict != VINF_SUCCESS);
14028 iemMemRollback(pVCpu);
14029 }
14030 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14031 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14032 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14033 }
14034 else if (pVCpu->iem.s.cActiveMappings > 0)
14035 iemMemRollback(pVCpu);
14036 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14037 }
14038
14039 /*
14040 * Return value fiddling, statistics and sanity assertions.
14041 */
14042 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14043
14044 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14045 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14046 return rcStrict;
14047}
14048
14049
14050#ifdef IN_RC
14051/**
14052 * Re-enters raw-mode or ensure we return to ring-3.
14053 *
14054 * @returns rcStrict, maybe modified.
14055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14056 * @param rcStrict The status code returne by the interpreter.
14057 */
14058DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14059{
14060 if ( !pVCpu->iem.s.fInPatchCode
14061 && ( rcStrict == VINF_SUCCESS
14062 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14063 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14064 {
14065 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14066 CPUMRawEnter(pVCpu);
14067 else
14068 {
14069 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14070 rcStrict = VINF_EM_RESCHEDULE;
14071 }
14072 }
14073 return rcStrict;
14074}
14075#endif
14076
14077
14078/**
14079 * Execute one instruction.
14080 *
14081 * @return Strict VBox status code.
14082 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14083 */
14084VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14085{
14086#ifdef LOG_ENABLED
14087 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14088#endif
14089
14090 /*
14091 * Do the decoding and emulation.
14092 */
14093 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14094 if (rcStrict == VINF_SUCCESS)
14095 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14096 else if (pVCpu->iem.s.cActiveMappings > 0)
14097 iemMemRollback(pVCpu);
14098
14099#ifdef IN_RC
14100 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14101#endif
14102 if (rcStrict != VINF_SUCCESS)
14103 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14104 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14105 return rcStrict;
14106}
14107
14108
14109VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14110{
14111 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14112
14113 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14114 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14115 if (rcStrict == VINF_SUCCESS)
14116 {
14117 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14118 if (pcbWritten)
14119 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14120 }
14121 else if (pVCpu->iem.s.cActiveMappings > 0)
14122 iemMemRollback(pVCpu);
14123
14124#ifdef IN_RC
14125 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14126#endif
14127 return rcStrict;
14128}
14129
14130
14131VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14132 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14133{
14134 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14135
14136 VBOXSTRICTRC rcStrict;
14137 if ( cbOpcodeBytes
14138 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14139 {
14140 iemInitDecoder(pVCpu, false);
14141#ifdef IEM_WITH_CODE_TLB
14142 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14143 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14144 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14145 pVCpu->iem.s.offCurInstrStart = 0;
14146 pVCpu->iem.s.offInstrNextByte = 0;
14147#else
14148 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14149 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14150#endif
14151 rcStrict = VINF_SUCCESS;
14152 }
14153 else
14154 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14155 if (rcStrict == VINF_SUCCESS)
14156 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14157 else if (pVCpu->iem.s.cActiveMappings > 0)
14158 iemMemRollback(pVCpu);
14159
14160#ifdef IN_RC
14161 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14162#endif
14163 return rcStrict;
14164}
14165
14166
14167VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14168{
14169 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14170
14171 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14172 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14173 if (rcStrict == VINF_SUCCESS)
14174 {
14175 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14176 if (pcbWritten)
14177 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14178 }
14179 else if (pVCpu->iem.s.cActiveMappings > 0)
14180 iemMemRollback(pVCpu);
14181
14182#ifdef IN_RC
14183 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14184#endif
14185 return rcStrict;
14186}
14187
14188
14189VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14190 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14191{
14192 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14193
14194 VBOXSTRICTRC rcStrict;
14195 if ( cbOpcodeBytes
14196 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14197 {
14198 iemInitDecoder(pVCpu, true);
14199#ifdef IEM_WITH_CODE_TLB
14200 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14201 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14202 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14203 pVCpu->iem.s.offCurInstrStart = 0;
14204 pVCpu->iem.s.offInstrNextByte = 0;
14205#else
14206 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14207 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14208#endif
14209 rcStrict = VINF_SUCCESS;
14210 }
14211 else
14212 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14213 if (rcStrict == VINF_SUCCESS)
14214 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14215 else if (pVCpu->iem.s.cActiveMappings > 0)
14216 iemMemRollback(pVCpu);
14217
14218#ifdef IN_RC
14219 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14220#endif
14221 return rcStrict;
14222}
14223
14224
14225/**
14226 * For debugging DISGetParamSize, may come in handy.
14227 *
14228 * @returns Strict VBox status code.
14229 * @param pVCpu The cross context virtual CPU structure of the
14230 * calling EMT.
14231 * @param pCtxCore The context core structure.
14232 * @param OpcodeBytesPC The PC of the opcode bytes.
14233 * @param pvOpcodeBytes Prefeched opcode bytes.
14234 * @param cbOpcodeBytes Number of prefetched bytes.
14235 * @param pcbWritten Where to return the number of bytes written.
14236 * Optional.
14237 */
14238VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14239 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14240 uint32_t *pcbWritten)
14241{
14242 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14243
14244 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14245 VBOXSTRICTRC rcStrict;
14246 if ( cbOpcodeBytes
14247 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14248 {
14249 iemInitDecoder(pVCpu, true);
14250#ifdef IEM_WITH_CODE_TLB
14251 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14252 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14253 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14254 pVCpu->iem.s.offCurInstrStart = 0;
14255 pVCpu->iem.s.offInstrNextByte = 0;
14256#else
14257 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14258 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14259#endif
14260 rcStrict = VINF_SUCCESS;
14261 }
14262 else
14263 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14264 if (rcStrict == VINF_SUCCESS)
14265 {
14266 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14267 if (pcbWritten)
14268 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14269 }
14270 else if (pVCpu->iem.s.cActiveMappings > 0)
14271 iemMemRollback(pVCpu);
14272
14273#ifdef IN_RC
14274 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14275#endif
14276 return rcStrict;
14277}
14278
14279
14280VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14281{
14282 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14283
14284 /*
14285 * See if there is an interrupt pending in TRPM, inject it if we can.
14286 */
14287 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14288#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14289 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14290 if (fIntrEnabled)
14291 {
14292 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14293 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14294 else
14295 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14296 }
14297#else
14298 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14299#endif
14300 if ( fIntrEnabled
14301 && TRPMHasTrap(pVCpu)
14302 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14303 {
14304 uint8_t u8TrapNo;
14305 TRPMEVENT enmType;
14306 RTGCUINT uErrCode;
14307 RTGCPTR uCr2;
14308 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14309 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14310 TRPMResetTrap(pVCpu);
14311 }
14312
14313 /*
14314 * Initial decoder init w/ prefetch, then setup setjmp.
14315 */
14316 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14317 if (rcStrict == VINF_SUCCESS)
14318 {
14319#ifdef IEM_WITH_SETJMP
14320 jmp_buf JmpBuf;
14321 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14322 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14323 pVCpu->iem.s.cActiveMappings = 0;
14324 if ((rcStrict = setjmp(JmpBuf)) == 0)
14325#endif
14326 {
14327 /*
14328 * The run loop. We limit ourselves to 4096 instructions right now.
14329 */
14330 PVM pVM = pVCpu->CTX_SUFF(pVM);
14331 uint32_t cInstr = 4096;
14332 for (;;)
14333 {
14334 /*
14335 * Log the state.
14336 */
14337#ifdef LOG_ENABLED
14338 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14339#endif
14340
14341 /*
14342 * Do the decoding and emulation.
14343 */
14344 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14345 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14346 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14347 {
14348 Assert(pVCpu->iem.s.cActiveMappings == 0);
14349 pVCpu->iem.s.cInstructions++;
14350 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14351 {
14352 uint64_t fCpu = pVCpu->fLocalForcedActions
14353 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14354 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14355 | VMCPU_FF_TLB_FLUSH
14356#ifdef VBOX_WITH_RAW_MODE
14357 | VMCPU_FF_TRPM_SYNC_IDT
14358 | VMCPU_FF_SELM_SYNC_TSS
14359 | VMCPU_FF_SELM_SYNC_GDT
14360 | VMCPU_FF_SELM_SYNC_LDT
14361#endif
14362 | VMCPU_FF_INHIBIT_INTERRUPTS
14363 | VMCPU_FF_BLOCK_NMIS
14364 | VMCPU_FF_UNHALT ));
14365
14366 if (RT_LIKELY( ( !fCpu
14367 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14368 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14369 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14370 {
14371 if (cInstr-- > 0)
14372 {
14373 Assert(pVCpu->iem.s.cActiveMappings == 0);
14374 iemReInitDecoder(pVCpu);
14375 continue;
14376 }
14377 }
14378 }
14379 Assert(pVCpu->iem.s.cActiveMappings == 0);
14380 }
14381 else if (pVCpu->iem.s.cActiveMappings > 0)
14382 iemMemRollback(pVCpu);
14383 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14384 break;
14385 }
14386 }
14387#ifdef IEM_WITH_SETJMP
14388 else
14389 {
14390 if (pVCpu->iem.s.cActiveMappings > 0)
14391 iemMemRollback(pVCpu);
14392 pVCpu->iem.s.cLongJumps++;
14393 }
14394 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14395#endif
14396
14397 /*
14398 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14399 */
14400 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14401 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14402 }
14403 else
14404 {
14405 if (pVCpu->iem.s.cActiveMappings > 0)
14406 iemMemRollback(pVCpu);
14407
14408#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14409 /*
14410 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14411 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14412 */
14413 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14414#endif
14415 }
14416
14417 /*
14418 * Maybe re-enter raw-mode and log.
14419 */
14420#ifdef IN_RC
14421 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14422#endif
14423 if (rcStrict != VINF_SUCCESS)
14424 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14425 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14426 if (pcInstructions)
14427 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14428 return rcStrict;
14429}
14430
14431
14432/**
14433 * Interface used by EMExecuteExec, does exit statistics and limits.
14434 *
14435 * @returns Strict VBox status code.
14436 * @param pVCpu The cross context virtual CPU structure.
14437 * @param fWillExit To be defined.
14438 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14439 * @param cMaxInstructions Maximum number of instructions to execute.
14440 * @param cMaxInstructionsWithoutExits
14441 * The max number of instructions without exits.
14442 * @param pStats Where to return statistics.
14443 */
14444VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14445 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14446{
14447 NOREF(fWillExit); /** @todo define flexible exit crits */
14448
14449 /*
14450 * Initialize return stats.
14451 */
14452 pStats->cInstructions = 0;
14453 pStats->cExits = 0;
14454 pStats->cMaxExitDistance = 0;
14455 pStats->cReserved = 0;
14456
14457 /*
14458 * Initial decoder init w/ prefetch, then setup setjmp.
14459 */
14460 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14461 if (rcStrict == VINF_SUCCESS)
14462 {
14463#ifdef IEM_WITH_SETJMP
14464 jmp_buf JmpBuf;
14465 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14466 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14467 pVCpu->iem.s.cActiveMappings = 0;
14468 if ((rcStrict = setjmp(JmpBuf)) == 0)
14469#endif
14470 {
14471#ifdef IN_RING0
14472 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14473#endif
14474 uint32_t cInstructionSinceLastExit = 0;
14475
14476 /*
14477 * The run loop. We limit ourselves to 4096 instructions right now.
14478 */
14479 PVM pVM = pVCpu->CTX_SUFF(pVM);
14480 for (;;)
14481 {
14482 /*
14483 * Log the state.
14484 */
14485#ifdef LOG_ENABLED
14486 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14487#endif
14488
14489 /*
14490 * Do the decoding and emulation.
14491 */
14492 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14493
14494 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14495 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14496
14497 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14498 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14499 {
14500 pStats->cExits += 1;
14501 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14502 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14503 cInstructionSinceLastExit = 0;
14504 }
14505
14506 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14507 {
14508 Assert(pVCpu->iem.s.cActiveMappings == 0);
14509 pVCpu->iem.s.cInstructions++;
14510 pStats->cInstructions++;
14511 cInstructionSinceLastExit++;
14512 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14513 {
14514 uint64_t fCpu = pVCpu->fLocalForcedActions
14515 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14516 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14517 | VMCPU_FF_TLB_FLUSH
14518#ifdef VBOX_WITH_RAW_MODE
14519 | VMCPU_FF_TRPM_SYNC_IDT
14520 | VMCPU_FF_SELM_SYNC_TSS
14521 | VMCPU_FF_SELM_SYNC_GDT
14522 | VMCPU_FF_SELM_SYNC_LDT
14523#endif
14524 | VMCPU_FF_INHIBIT_INTERRUPTS
14525 | VMCPU_FF_BLOCK_NMIS
14526 | VMCPU_FF_UNHALT ));
14527
14528 if (RT_LIKELY( ( ( !fCpu
14529 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14530 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14531 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14532 || pStats->cInstructions < cMinInstructions))
14533 {
14534 if (pStats->cInstructions < cMaxInstructions)
14535 {
14536 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14537 {
14538#ifdef IN_RING0
14539 if ( !fCheckPreemptionPending
14540 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14541#endif
14542 {
14543 Assert(pVCpu->iem.s.cActiveMappings == 0);
14544 iemReInitDecoder(pVCpu);
14545 continue;
14546 }
14547#ifdef IN_RING0
14548 rcStrict = VINF_EM_RAW_INTERRUPT;
14549 break;
14550#endif
14551 }
14552 }
14553 }
14554 Assert(!(fCpu & VMCPU_FF_IEM));
14555 }
14556 Assert(pVCpu->iem.s.cActiveMappings == 0);
14557 }
14558 else if (pVCpu->iem.s.cActiveMappings > 0)
14559 iemMemRollback(pVCpu);
14560 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14561 break;
14562 }
14563 }
14564#ifdef IEM_WITH_SETJMP
14565 else
14566 {
14567 if (pVCpu->iem.s.cActiveMappings > 0)
14568 iemMemRollback(pVCpu);
14569 pVCpu->iem.s.cLongJumps++;
14570 }
14571 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14572#endif
14573
14574 /*
14575 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14576 */
14577 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14578 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14579 }
14580 else
14581 {
14582 if (pVCpu->iem.s.cActiveMappings > 0)
14583 iemMemRollback(pVCpu);
14584
14585#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14586 /*
14587 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14588 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14589 */
14590 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14591#endif
14592 }
14593
14594 /*
14595 * Maybe re-enter raw-mode and log.
14596 */
14597#ifdef IN_RC
14598 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14599#endif
14600 if (rcStrict != VINF_SUCCESS)
14601 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14602 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14603 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14604 return rcStrict;
14605}
14606
14607
14608/**
14609 * Injects a trap, fault, abort, software interrupt or external interrupt.
14610 *
14611 * The parameter list matches TRPMQueryTrapAll pretty closely.
14612 *
14613 * @returns Strict VBox status code.
14614 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14615 * @param u8TrapNo The trap number.
14616 * @param enmType What type is it (trap/fault/abort), software
14617 * interrupt or hardware interrupt.
14618 * @param uErrCode The error code if applicable.
14619 * @param uCr2 The CR2 value if applicable.
14620 * @param cbInstr The instruction length (only relevant for
14621 * software interrupts).
14622 */
14623VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14624 uint8_t cbInstr)
14625{
14626 iemInitDecoder(pVCpu, false);
14627#ifdef DBGFTRACE_ENABLED
14628 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14629 u8TrapNo, enmType, uErrCode, uCr2);
14630#endif
14631
14632 uint32_t fFlags;
14633 switch (enmType)
14634 {
14635 case TRPM_HARDWARE_INT:
14636 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14637 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14638 uErrCode = uCr2 = 0;
14639 break;
14640
14641 case TRPM_SOFTWARE_INT:
14642 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14643 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14644 uErrCode = uCr2 = 0;
14645 break;
14646
14647 case TRPM_TRAP:
14648 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14649 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14650 if (u8TrapNo == X86_XCPT_PF)
14651 fFlags |= IEM_XCPT_FLAGS_CR2;
14652 switch (u8TrapNo)
14653 {
14654 case X86_XCPT_DF:
14655 case X86_XCPT_TS:
14656 case X86_XCPT_NP:
14657 case X86_XCPT_SS:
14658 case X86_XCPT_PF:
14659 case X86_XCPT_AC:
14660 fFlags |= IEM_XCPT_FLAGS_ERR;
14661 break;
14662
14663 case X86_XCPT_NMI:
14664 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14665 break;
14666 }
14667 break;
14668
14669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14670 }
14671
14672 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14673
14674 if (pVCpu->iem.s.cActiveMappings > 0)
14675 iemMemRollback(pVCpu);
14676
14677 return rcStrict;
14678}
14679
14680
14681/**
14682 * Injects the active TRPM event.
14683 *
14684 * @returns Strict VBox status code.
14685 * @param pVCpu The cross context virtual CPU structure.
14686 */
14687VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14688{
14689#ifndef IEM_IMPLEMENTS_TASKSWITCH
14690 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14691#else
14692 uint8_t u8TrapNo;
14693 TRPMEVENT enmType;
14694 RTGCUINT uErrCode;
14695 RTGCUINTPTR uCr2;
14696 uint8_t cbInstr;
14697 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14698 if (RT_FAILURE(rc))
14699 return rc;
14700
14701 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14702# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14703 if (rcStrict == VINF_SVM_VMEXIT)
14704 rcStrict = VINF_SUCCESS;
14705# endif
14706
14707 /** @todo Are there any other codes that imply the event was successfully
14708 * delivered to the guest? See @bugref{6607}. */
14709 if ( rcStrict == VINF_SUCCESS
14710 || rcStrict == VINF_IEM_RAISED_XCPT)
14711 TRPMResetTrap(pVCpu);
14712
14713 return rcStrict;
14714#endif
14715}
14716
14717
14718VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14719{
14720 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14721 return VERR_NOT_IMPLEMENTED;
14722}
14723
14724
14725VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14726{
14727 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14728 return VERR_NOT_IMPLEMENTED;
14729}
14730
14731
14732#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14733/**
14734 * Executes a IRET instruction with default operand size.
14735 *
14736 * This is for PATM.
14737 *
14738 * @returns VBox status code.
14739 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14740 * @param pCtxCore The register frame.
14741 */
14742VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14743{
14744 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14745
14746 iemCtxCoreToCtx(pCtx, pCtxCore);
14747 iemInitDecoder(pVCpu);
14748 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14749 if (rcStrict == VINF_SUCCESS)
14750 iemCtxToCtxCore(pCtxCore, pCtx);
14751 else
14752 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14753 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14754 return rcStrict;
14755}
14756#endif
14757
14758
14759/**
14760 * Macro used by the IEMExec* method to check the given instruction length.
14761 *
14762 * Will return on failure!
14763 *
14764 * @param a_cbInstr The given instruction length.
14765 * @param a_cbMin The minimum length.
14766 */
14767#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14768 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14769 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14770
14771
14772/**
14773 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14774 *
14775 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14776 *
14777 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14779 * @param rcStrict The status code to fiddle.
14780 */
14781DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14782{
14783 iemUninitExec(pVCpu);
14784#ifdef IN_RC
14785 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14786#else
14787 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14788#endif
14789}
14790
14791
14792/**
14793 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14794 *
14795 * This API ASSUMES that the caller has already verified that the guest code is
14796 * allowed to access the I/O port. (The I/O port is in the DX register in the
14797 * guest state.)
14798 *
14799 * @returns Strict VBox status code.
14800 * @param pVCpu The cross context virtual CPU structure.
14801 * @param cbValue The size of the I/O port access (1, 2, or 4).
14802 * @param enmAddrMode The addressing mode.
14803 * @param fRepPrefix Indicates whether a repeat prefix is used
14804 * (doesn't matter which for this instruction).
14805 * @param cbInstr The instruction length in bytes.
14806 * @param iEffSeg The effective segment address.
14807 * @param fIoChecked Whether the access to the I/O port has been
14808 * checked or not. It's typically checked in the
14809 * HM scenario.
14810 */
14811VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14812 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14813{
14814 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14815 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14816
14817 /*
14818 * State init.
14819 */
14820 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14821
14822 /*
14823 * Switch orgy for getting to the right handler.
14824 */
14825 VBOXSTRICTRC rcStrict;
14826 if (fRepPrefix)
14827 {
14828 switch (enmAddrMode)
14829 {
14830 case IEMMODE_16BIT:
14831 switch (cbValue)
14832 {
14833 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14834 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14835 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14836 default:
14837 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14838 }
14839 break;
14840
14841 case IEMMODE_32BIT:
14842 switch (cbValue)
14843 {
14844 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14845 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14846 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14847 default:
14848 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14849 }
14850 break;
14851
14852 case IEMMODE_64BIT:
14853 switch (cbValue)
14854 {
14855 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14856 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14857 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14858 default:
14859 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14860 }
14861 break;
14862
14863 default:
14864 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14865 }
14866 }
14867 else
14868 {
14869 switch (enmAddrMode)
14870 {
14871 case IEMMODE_16BIT:
14872 switch (cbValue)
14873 {
14874 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14875 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14876 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14877 default:
14878 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14879 }
14880 break;
14881
14882 case IEMMODE_32BIT:
14883 switch (cbValue)
14884 {
14885 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14886 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14887 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14888 default:
14889 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14890 }
14891 break;
14892
14893 case IEMMODE_64BIT:
14894 switch (cbValue)
14895 {
14896 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14897 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14898 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14899 default:
14900 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14901 }
14902 break;
14903
14904 default:
14905 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14906 }
14907 }
14908
14909 if (pVCpu->iem.s.cActiveMappings)
14910 iemMemRollback(pVCpu);
14911
14912 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14913}
14914
14915
14916/**
14917 * Interface for HM and EM for executing string I/O IN (read) instructions.
14918 *
14919 * This API ASSUMES that the caller has already verified that the guest code is
14920 * allowed to access the I/O port. (The I/O port is in the DX register in the
14921 * guest state.)
14922 *
14923 * @returns Strict VBox status code.
14924 * @param pVCpu The cross context virtual CPU structure.
14925 * @param cbValue The size of the I/O port access (1, 2, or 4).
14926 * @param enmAddrMode The addressing mode.
14927 * @param fRepPrefix Indicates whether a repeat prefix is used
14928 * (doesn't matter which for this instruction).
14929 * @param cbInstr The instruction length in bytes.
14930 * @param fIoChecked Whether the access to the I/O port has been
14931 * checked or not. It's typically checked in the
14932 * HM scenario.
14933 */
14934VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14935 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14936{
14937 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14938
14939 /*
14940 * State init.
14941 */
14942 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14943
14944 /*
14945 * Switch orgy for getting to the right handler.
14946 */
14947 VBOXSTRICTRC rcStrict;
14948 if (fRepPrefix)
14949 {
14950 switch (enmAddrMode)
14951 {
14952 case IEMMODE_16BIT:
14953 switch (cbValue)
14954 {
14955 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14956 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14957 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14958 default:
14959 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14960 }
14961 break;
14962
14963 case IEMMODE_32BIT:
14964 switch (cbValue)
14965 {
14966 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14967 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14968 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14969 default:
14970 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14971 }
14972 break;
14973
14974 case IEMMODE_64BIT:
14975 switch (cbValue)
14976 {
14977 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14978 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14979 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14980 default:
14981 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14982 }
14983 break;
14984
14985 default:
14986 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14987 }
14988 }
14989 else
14990 {
14991 switch (enmAddrMode)
14992 {
14993 case IEMMODE_16BIT:
14994 switch (cbValue)
14995 {
14996 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14997 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14998 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14999 default:
15000 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15001 }
15002 break;
15003
15004 case IEMMODE_32BIT:
15005 switch (cbValue)
15006 {
15007 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15008 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15009 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15010 default:
15011 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15012 }
15013 break;
15014
15015 case IEMMODE_64BIT:
15016 switch (cbValue)
15017 {
15018 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15019 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15020 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15021 default:
15022 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15023 }
15024 break;
15025
15026 default:
15027 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15028 }
15029 }
15030
15031 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15032 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15033}
15034
15035
15036/**
15037 * Interface for rawmode to write execute an OUT instruction.
15038 *
15039 * @returns Strict VBox status code.
15040 * @param pVCpu The cross context virtual CPU structure.
15041 * @param cbInstr The instruction length in bytes.
15042 * @param u16Port The port to read.
15043 * @param fImm Whether the port is specified using an immediate operand or
15044 * using the implicit DX register.
15045 * @param cbReg The register size.
15046 *
15047 * @remarks In ring-0 not all of the state needs to be synced in.
15048 */
15049VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15050{
15051 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15052 Assert(cbReg <= 4 && cbReg != 3);
15053
15054 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15055 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15056 Assert(!pVCpu->iem.s.cActiveMappings);
15057 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15058}
15059
15060
15061/**
15062 * Interface for rawmode to write execute an IN instruction.
15063 *
15064 * @returns Strict VBox status code.
15065 * @param pVCpu The cross context virtual CPU structure.
15066 * @param cbInstr The instruction length in bytes.
15067 * @param u16Port The port to read.
15068 * @param fImm Whether the port is specified using an immediate operand or
15069 * using the implicit DX.
15070 * @param cbReg The register size.
15071 */
15072VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15073{
15074 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15075 Assert(cbReg <= 4 && cbReg != 3);
15076
15077 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15078 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15079 Assert(!pVCpu->iem.s.cActiveMappings);
15080 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15081}
15082
15083
15084/**
15085 * Interface for HM and EM to write to a CRx register.
15086 *
15087 * @returns Strict VBox status code.
15088 * @param pVCpu The cross context virtual CPU structure.
15089 * @param cbInstr The instruction length in bytes.
15090 * @param iCrReg The control register number (destination).
15091 * @param iGReg The general purpose register number (source).
15092 *
15093 * @remarks In ring-0 not all of the state needs to be synced in.
15094 */
15095VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15096{
15097 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15098 Assert(iCrReg < 16);
15099 Assert(iGReg < 16);
15100
15101 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15102 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15103 Assert(!pVCpu->iem.s.cActiveMappings);
15104 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15105}
15106
15107
15108/**
15109 * Interface for HM and EM to read from a CRx register.
15110 *
15111 * @returns Strict VBox status code.
15112 * @param pVCpu The cross context virtual CPU structure.
15113 * @param cbInstr The instruction length in bytes.
15114 * @param iGReg The general purpose register number (destination).
15115 * @param iCrReg The control register number (source).
15116 *
15117 * @remarks In ring-0 not all of the state needs to be synced in.
15118 */
15119VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15120{
15121 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15122 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15123 | CPUMCTX_EXTRN_APIC_TPR);
15124 Assert(iCrReg < 16);
15125 Assert(iGReg < 16);
15126
15127 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15128 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15129 Assert(!pVCpu->iem.s.cActiveMappings);
15130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15131}
15132
15133
15134/**
15135 * Interface for HM and EM to clear the CR0[TS] bit.
15136 *
15137 * @returns Strict VBox status code.
15138 * @param pVCpu The cross context virtual CPU structure.
15139 * @param cbInstr The instruction length in bytes.
15140 *
15141 * @remarks In ring-0 not all of the state needs to be synced in.
15142 */
15143VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15144{
15145 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15146
15147 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15148 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15149 Assert(!pVCpu->iem.s.cActiveMappings);
15150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15151}
15152
15153
15154/**
15155 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15156 *
15157 * @returns Strict VBox status code.
15158 * @param pVCpu The cross context virtual CPU structure.
15159 * @param cbInstr The instruction length in bytes.
15160 * @param uValue The value to load into CR0.
15161 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15162 * memory operand. Otherwise pass NIL_RTGCPTR.
15163 *
15164 * @remarks In ring-0 not all of the state needs to be synced in.
15165 */
15166VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15167{
15168 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15169
15170 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15171 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15172 Assert(!pVCpu->iem.s.cActiveMappings);
15173 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15174}
15175
15176
15177/**
15178 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15179 *
15180 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15181 *
15182 * @returns Strict VBox status code.
15183 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15184 * @param cbInstr The instruction length in bytes.
15185 * @remarks In ring-0 not all of the state needs to be synced in.
15186 * @thread EMT(pVCpu)
15187 */
15188VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15189{
15190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15191
15192 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15193 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15194 Assert(!pVCpu->iem.s.cActiveMappings);
15195 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15196}
15197
15198
15199/**
15200 * Interface for HM and EM to emulate the WBINVD instruction.
15201 *
15202 * @returns Strict VBox status code.
15203 * @param pVCpu The cross context virtual CPU structure.
15204 * @param cbInstr The instruction length in bytes.
15205 *
15206 * @remarks In ring-0 not all of the state needs to be synced in.
15207 */
15208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15209{
15210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15211
15212 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15213 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15214 Assert(!pVCpu->iem.s.cActiveMappings);
15215 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15216}
15217
15218
15219/**
15220 * Interface for HM and EM to emulate the INVD instruction.
15221 *
15222 * @returns Strict VBox status code.
15223 * @param pVCpu The cross context virtual CPU structure.
15224 * @param cbInstr The instruction length in bytes.
15225 *
15226 * @remarks In ring-0 not all of the state needs to be synced in.
15227 */
15228VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15229{
15230 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15231
15232 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15233 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15234 Assert(!pVCpu->iem.s.cActiveMappings);
15235 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15236}
15237
15238
15239/**
15240 * Interface for HM and EM to emulate the INVLPG instruction.
15241 *
15242 * @returns Strict VBox status code.
15243 * @retval VINF_PGM_SYNC_CR3
15244 *
15245 * @param pVCpu The cross context virtual CPU structure.
15246 * @param cbInstr The instruction length in bytes.
15247 * @param GCPtrPage The effective address of the page to invalidate.
15248 *
15249 * @remarks In ring-0 not all of the state needs to be synced in.
15250 */
15251VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15252{
15253 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15254
15255 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15256 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15257 Assert(!pVCpu->iem.s.cActiveMappings);
15258 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15259}
15260
15261
15262/**
15263 * Interface for HM and EM to emulate the CPUID instruction.
15264 *
15265 * @returns Strict VBox status code.
15266 *
15267 * @param pVCpu The cross context virtual CPU structure.
15268 * @param cbInstr The instruction length in bytes.
15269 *
15270 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15271 */
15272VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15273{
15274 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15275 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15276
15277 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15278 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15279 Assert(!pVCpu->iem.s.cActiveMappings);
15280 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15281}
15282
15283
15284/**
15285 * Interface for HM and EM to emulate the RDPMC instruction.
15286 *
15287 * @returns Strict VBox status code.
15288 *
15289 * @param pVCpu The cross context virtual CPU structure.
15290 * @param cbInstr The instruction length in bytes.
15291 *
15292 * @remarks Not all of the state needs to be synced in.
15293 */
15294VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15295{
15296 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15297 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15298
15299 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15300 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15301 Assert(!pVCpu->iem.s.cActiveMappings);
15302 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15303}
15304
15305
15306/**
15307 * Interface for HM and EM to emulate the RDTSC instruction.
15308 *
15309 * @returns Strict VBox status code.
15310 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15311 *
15312 * @param pVCpu The cross context virtual CPU structure.
15313 * @param cbInstr The instruction length in bytes.
15314 *
15315 * @remarks Not all of the state needs to be synced in.
15316 */
15317VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15318{
15319 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15320 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15321
15322 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15323 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15324 Assert(!pVCpu->iem.s.cActiveMappings);
15325 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15326}
15327
15328
15329/**
15330 * Interface for HM and EM to emulate the RDTSCP instruction.
15331 *
15332 * @returns Strict VBox status code.
15333 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15334 *
15335 * @param pVCpu The cross context virtual CPU structure.
15336 * @param cbInstr The instruction length in bytes.
15337 *
15338 * @remarks Not all of the state needs to be synced in. Recommended
15339 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15340 */
15341VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15342{
15343 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15344 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15345
15346 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15347 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15348 Assert(!pVCpu->iem.s.cActiveMappings);
15349 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15350}
15351
15352
15353/**
15354 * Interface for HM and EM to emulate the RDMSR instruction.
15355 *
15356 * @returns Strict VBox status code.
15357 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15358 *
15359 * @param pVCpu The cross context virtual CPU structure.
15360 * @param cbInstr The instruction length in bytes.
15361 *
15362 * @remarks Not all of the state needs to be synced in. Requires RCX and
15363 * (currently) all MSRs.
15364 */
15365VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15366{
15367 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15368 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15369
15370 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15371 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15372 Assert(!pVCpu->iem.s.cActiveMappings);
15373 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15374}
15375
15376
15377/**
15378 * Interface for HM and EM to emulate the WRMSR instruction.
15379 *
15380 * @returns Strict VBox status code.
15381 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15382 *
15383 * @param pVCpu The cross context virtual CPU structure.
15384 * @param cbInstr The instruction length in bytes.
15385 *
15386 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15387 * and (currently) all MSRs.
15388 */
15389VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15390{
15391 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15392 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15393 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15394
15395 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15396 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15397 Assert(!pVCpu->iem.s.cActiveMappings);
15398 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15399}
15400
15401
15402/**
15403 * Interface for HM and EM to emulate the MONITOR instruction.
15404 *
15405 * @returns Strict VBox status code.
15406 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15407 *
15408 * @param pVCpu The cross context virtual CPU structure.
15409 * @param cbInstr The instruction length in bytes.
15410 *
15411 * @remarks Not all of the state needs to be synced in.
15412 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15413 * are used.
15414 */
15415VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15416{
15417 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15418 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15419
15420 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15421 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15422 Assert(!pVCpu->iem.s.cActiveMappings);
15423 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15424}
15425
15426
15427/**
15428 * Interface for HM and EM to emulate the MWAIT instruction.
15429 *
15430 * @returns Strict VBox status code.
15431 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15432 *
15433 * @param pVCpu The cross context virtual CPU structure.
15434 * @param cbInstr The instruction length in bytes.
15435 *
15436 * @remarks Not all of the state needs to be synced in.
15437 */
15438VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15439{
15440 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15441
15442 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15443 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15444 Assert(!pVCpu->iem.s.cActiveMappings);
15445 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15446}
15447
15448
15449/**
15450 * Interface for HM and EM to emulate the HLT instruction.
15451 *
15452 * @returns Strict VBox status code.
15453 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15454 *
15455 * @param pVCpu The cross context virtual CPU structure.
15456 * @param cbInstr The instruction length in bytes.
15457 *
15458 * @remarks Not all of the state needs to be synced in.
15459 */
15460VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15461{
15462 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15463
15464 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15465 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15466 Assert(!pVCpu->iem.s.cActiveMappings);
15467 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15468}
15469
15470
15471/**
15472 * Checks if IEM is in the process of delivering an event (interrupt or
15473 * exception).
15474 *
15475 * @returns true if we're in the process of raising an interrupt or exception,
15476 * false otherwise.
15477 * @param pVCpu The cross context virtual CPU structure.
15478 * @param puVector Where to store the vector associated with the
15479 * currently delivered event, optional.
15480 * @param pfFlags Where to store th event delivery flags (see
15481 * IEM_XCPT_FLAGS_XXX), optional.
15482 * @param puErr Where to store the error code associated with the
15483 * event, optional.
15484 * @param puCr2 Where to store the CR2 associated with the event,
15485 * optional.
15486 * @remarks The caller should check the flags to determine if the error code and
15487 * CR2 are valid for the event.
15488 */
15489VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15490{
15491 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15492 if (fRaisingXcpt)
15493 {
15494 if (puVector)
15495 *puVector = pVCpu->iem.s.uCurXcpt;
15496 if (pfFlags)
15497 *pfFlags = pVCpu->iem.s.fCurXcpt;
15498 if (puErr)
15499 *puErr = pVCpu->iem.s.uCurXcptErr;
15500 if (puCr2)
15501 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15502 }
15503 return fRaisingXcpt;
15504}
15505
15506#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15507
15508/**
15509 * Interface for HM and EM to emulate the CLGI instruction.
15510 *
15511 * @returns Strict VBox status code.
15512 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15513 * @param cbInstr The instruction length in bytes.
15514 * @thread EMT(pVCpu)
15515 */
15516VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15517{
15518 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15519
15520 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15521 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15522 Assert(!pVCpu->iem.s.cActiveMappings);
15523 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15524}
15525
15526
15527/**
15528 * Interface for HM and EM to emulate the STGI instruction.
15529 *
15530 * @returns Strict VBox status code.
15531 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15532 * @param cbInstr The instruction length in bytes.
15533 * @thread EMT(pVCpu)
15534 */
15535VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15536{
15537 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15538
15539 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15540 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15541 Assert(!pVCpu->iem.s.cActiveMappings);
15542 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15543}
15544
15545
15546/**
15547 * Interface for HM and EM to emulate the VMLOAD instruction.
15548 *
15549 * @returns Strict VBox status code.
15550 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15551 * @param cbInstr The instruction length in bytes.
15552 * @thread EMT(pVCpu)
15553 */
15554VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15555{
15556 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15557
15558 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15559 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15560 Assert(!pVCpu->iem.s.cActiveMappings);
15561 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15562}
15563
15564
15565/**
15566 * Interface for HM and EM to emulate the VMSAVE instruction.
15567 *
15568 * @returns Strict VBox status code.
15569 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15570 * @param cbInstr The instruction length in bytes.
15571 * @thread EMT(pVCpu)
15572 */
15573VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15574{
15575 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15576
15577 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15578 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15579 Assert(!pVCpu->iem.s.cActiveMappings);
15580 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15581}
15582
15583
15584/**
15585 * Interface for HM and EM to emulate the INVLPGA instruction.
15586 *
15587 * @returns Strict VBox status code.
15588 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15589 * @param cbInstr The instruction length in bytes.
15590 * @thread EMT(pVCpu)
15591 */
15592VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15593{
15594 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15595
15596 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15597 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15598 Assert(!pVCpu->iem.s.cActiveMappings);
15599 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15600}
15601
15602
15603/**
15604 * Interface for HM and EM to emulate the VMRUN instruction.
15605 *
15606 * @returns Strict VBox status code.
15607 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15608 * @param cbInstr The instruction length in bytes.
15609 * @thread EMT(pVCpu)
15610 */
15611VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15612{
15613 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15614 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15615
15616 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15617 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15618 Assert(!pVCpu->iem.s.cActiveMappings);
15619 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15620}
15621
15622
15623/**
15624 * Interface for HM and EM to emulate \#VMEXIT.
15625 *
15626 * @returns Strict VBox status code.
15627 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15628 * @param uExitCode The exit code.
15629 * @param uExitInfo1 The exit info. 1 field.
15630 * @param uExitInfo2 The exit info. 2 field.
15631 * @thread EMT(pVCpu)
15632 */
15633VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15634{
15635 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15636 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15637 if (pVCpu->iem.s.cActiveMappings)
15638 iemMemRollback(pVCpu);
15639 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15640}
15641
15642#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15643
15644#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15645
15646/**
15647 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15648 *
15649 * @returns Strict VBox status code.
15650 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15651 * @thread EMT(pVCpu)
15652 */
15653VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15654{
15655 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15656 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15657 if (pVCpu->iem.s.cActiveMappings)
15658 iemMemRollback(pVCpu);
15659 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15660}
15661
15662
15663/**
15664 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15665 *
15666 * @returns Strict VBox status code.
15667 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15668 * @param uVector The external interrupt vector.
15669 * @param fIntPending Whether the external interrupt is pending or
15670 * acknowdledged in the interrupt controller.
15671 * @thread EMT(pVCpu)
15672 */
15673VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15674{
15675 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15676 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15677 if (pVCpu->iem.s.cActiveMappings)
15678 iemMemRollback(pVCpu);
15679 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15680}
15681
15682
15683/**
15684 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15685 *
15686 * @returns Strict VBox status code.
15687 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15688 * @param uVector The SIPI vector.
15689 * @thread EMT(pVCpu)
15690 */
15691VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15692{
15693 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15694 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15695 if (pVCpu->iem.s.cActiveMappings)
15696 iemMemRollback(pVCpu);
15697 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15698}
15699
15700
15701/**
15702 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15703 *
15704 * @returns Strict VBox status code.
15705 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15706 * @thread EMT(pVCpu)
15707 */
15708VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15709{
15710 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15711 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15712 if (pVCpu->iem.s.cActiveMappings)
15713 iemMemRollback(pVCpu);
15714 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15715}
15716
15717
15718/**
15719 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15720 *
15721 * @returns Strict VBox status code.
15722 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15723 * @param uExitReason The VM-exit reason.
15724 * @param uExitQual The VM-exit qualification.
15725 *
15726 * @thread EMT(pVCpu)
15727 */
15728VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15729{
15730 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15731 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15732 if (pVCpu->iem.s.cActiveMappings)
15733 iemMemRollback(pVCpu);
15734 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15735}
15736
15737
15738/**
15739 * Interface for HM and EM to emulate the VMREAD instruction.
15740 *
15741 * @returns Strict VBox status code.
15742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15743 * @param pExitInfo Pointer to the VM-exit information struct.
15744 * @thread EMT(pVCpu)
15745 */
15746VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15747{
15748 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15749 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15750 Assert(pExitInfo);
15751
15752 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15753
15754 VBOXSTRICTRC rcStrict;
15755 uint8_t const cbInstr = pExitInfo->cbInstr;
15756 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15757 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15758 {
15759 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15760 {
15761 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15762 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15763 }
15764 else
15765 {
15766 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15767 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15768 }
15769 }
15770 else
15771 {
15772 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15773 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15774 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15775 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15776 }
15777 if (pVCpu->iem.s.cActiveMappings)
15778 iemMemRollback(pVCpu);
15779 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15780}
15781
15782
15783/**
15784 * Interface for HM and EM to emulate the VMWRITE instruction.
15785 *
15786 * @returns Strict VBox status code.
15787 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15788 * @param pExitInfo Pointer to the VM-exit information struct.
15789 * @thread EMT(pVCpu)
15790 */
15791VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15792{
15793 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15794 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15795 Assert(pExitInfo);
15796
15797 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15798
15799 uint64_t u64Val;
15800 uint8_t iEffSeg;
15801 IEMMODE enmEffAddrMode;
15802 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15803 {
15804 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15805 iEffSeg = UINT8_MAX;
15806 enmEffAddrMode = UINT8_MAX;
15807 }
15808 else
15809 {
15810 u64Val = pExitInfo->GCPtrEffAddr;
15811 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15812 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15813 }
15814 uint8_t const cbInstr = pExitInfo->cbInstr;
15815 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15816 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15817 if (pVCpu->iem.s.cActiveMappings)
15818 iemMemRollback(pVCpu);
15819 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15820}
15821
15822
15823/**
15824 * Interface for HM and EM to emulate the VMPTRLD instruction.
15825 *
15826 * @returns Strict VBox status code.
15827 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15828 * @param pExitInfo Pointer to the VM-exit information struct.
15829 * @thread EMT(pVCpu)
15830 */
15831VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15832{
15833 Assert(pExitInfo);
15834 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15835 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15836
15837 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15838
15839 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15840 uint8_t const cbInstr = pExitInfo->cbInstr;
15841 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15842 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15843 if (pVCpu->iem.s.cActiveMappings)
15844 iemMemRollback(pVCpu);
15845 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15846}
15847
15848
15849/**
15850 * Interface for HM and EM to emulate the VMPTRST instruction.
15851 *
15852 * @returns Strict VBox status code.
15853 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15854 * @param pExitInfo Pointer to the VM-exit information struct.
15855 * @thread EMT(pVCpu)
15856 */
15857VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15858{
15859 Assert(pExitInfo);
15860 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15861 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15862
15863 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15864
15865 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15866 uint8_t const cbInstr = pExitInfo->cbInstr;
15867 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15868 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15869 if (pVCpu->iem.s.cActiveMappings)
15870 iemMemRollback(pVCpu);
15871 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15872}
15873
15874
15875/**
15876 * Interface for HM and EM to emulate the VMCLEAR instruction.
15877 *
15878 * @returns Strict VBox status code.
15879 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15880 * @param pExitInfo Pointer to the VM-exit information struct.
15881 * @thread EMT(pVCpu)
15882 */
15883VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15884{
15885 Assert(pExitInfo);
15886 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15887 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15888
15889 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15890
15891 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15892 uint8_t const cbInstr = pExitInfo->cbInstr;
15893 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15894 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15895 if (pVCpu->iem.s.cActiveMappings)
15896 iemMemRollback(pVCpu);
15897 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15898}
15899
15900
15901/**
15902 * Interface for HM and EM to emulate the VMXON instruction.
15903 *
15904 * @returns Strict VBox status code.
15905 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15906 * @param pExitInfo Pointer to the VM-exit information struct.
15907 * @thread EMT(pVCpu)
15908 */
15909VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15910{
15911 Assert(pExitInfo);
15912 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15913 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15914
15915 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15916
15917 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15918 uint8_t const cbInstr = pExitInfo->cbInstr;
15919 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15920 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15921 if (pVCpu->iem.s.cActiveMappings)
15922 iemMemRollback(pVCpu);
15923 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15924}
15925
15926
15927/**
15928 * Interface for HM and EM to emulate the VMXOFF instruction.
15929 *
15930 * @returns Strict VBox status code.
15931 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15932 * @param cbInstr The instruction length in bytes.
15933 * @thread EMT(pVCpu)
15934 */
15935VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15936{
15937 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15938 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HM_VMX_MASK);
15939
15940 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15941 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15942 Assert(!pVCpu->iem.s.cActiveMappings);
15943 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15944}
15945
15946#endif
15947
15948#ifdef IN_RING3
15949
15950/**
15951 * Handles the unlikely and probably fatal merge cases.
15952 *
15953 * @returns Merged status code.
15954 * @param rcStrict Current EM status code.
15955 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15956 * with @a rcStrict.
15957 * @param iMemMap The memory mapping index. For error reporting only.
15958 * @param pVCpu The cross context virtual CPU structure of the calling
15959 * thread, for error reporting only.
15960 */
15961DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15962 unsigned iMemMap, PVMCPU pVCpu)
15963{
15964 if (RT_FAILURE_NP(rcStrict))
15965 return rcStrict;
15966
15967 if (RT_FAILURE_NP(rcStrictCommit))
15968 return rcStrictCommit;
15969
15970 if (rcStrict == rcStrictCommit)
15971 return rcStrictCommit;
15972
15973 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15974 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15975 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15976 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15977 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15978 return VERR_IOM_FF_STATUS_IPE;
15979}
15980
15981
15982/**
15983 * Helper for IOMR3ProcessForceFlag.
15984 *
15985 * @returns Merged status code.
15986 * @param rcStrict Current EM status code.
15987 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15988 * with @a rcStrict.
15989 * @param iMemMap The memory mapping index. For error reporting only.
15990 * @param pVCpu The cross context virtual CPU structure of the calling
15991 * thread, for error reporting only.
15992 */
15993DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15994{
15995 /* Simple. */
15996 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15997 return rcStrictCommit;
15998
15999 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16000 return rcStrict;
16001
16002 /* EM scheduling status codes. */
16003 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16004 && rcStrict <= VINF_EM_LAST))
16005 {
16006 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16007 && rcStrictCommit <= VINF_EM_LAST))
16008 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16009 }
16010
16011 /* Unlikely */
16012 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16013}
16014
16015
16016/**
16017 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16018 *
16019 * @returns Merge between @a rcStrict and what the commit operation returned.
16020 * @param pVM The cross context VM structure.
16021 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16022 * @param rcStrict The status code returned by ring-0 or raw-mode.
16023 */
16024VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16025{
16026 /*
16027 * Reset the pending commit.
16028 */
16029 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16030 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16031 ("%#x %#x %#x\n",
16032 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16033 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16034
16035 /*
16036 * Commit the pending bounce buffers (usually just one).
16037 */
16038 unsigned cBufs = 0;
16039 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16040 while (iMemMap-- > 0)
16041 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16042 {
16043 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16044 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16045 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16046
16047 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16048 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16049 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16050
16051 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16052 {
16053 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16054 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16055 pbBuf,
16056 cbFirst,
16057 PGMACCESSORIGIN_IEM);
16058 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16059 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16060 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16061 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16062 }
16063
16064 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16065 {
16066 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16067 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16068 pbBuf + cbFirst,
16069 cbSecond,
16070 PGMACCESSORIGIN_IEM);
16071 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16072 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16073 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16074 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16075 }
16076 cBufs++;
16077 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16078 }
16079
16080 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16081 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16082 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16083 pVCpu->iem.s.cActiveMappings = 0;
16084 return rcStrict;
16085}
16086
16087#endif /* IN_RING3 */
16088
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette