VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 75620

Last change on this file since 75620 was 75620, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 APIC-write emulation bits.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 640.6 KB
Line 
1/* $Id: IEMAll.cpp 75620 2018-11-20 14:42:07Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/asm-math.h>
121#include <iprt/assert.h>
122#include <iprt/string.h>
123#include <iprt/x86.h>
124
125
126/*********************************************************************************************************************************
127* Structures and Typedefs *
128*********************************************************************************************************************************/
129/** @typedef PFNIEMOP
130 * Pointer to an opcode decoder function.
131 */
132
133/** @def FNIEMOP_DEF
134 * Define an opcode decoder function.
135 *
136 * We're using macors for this so that adding and removing parameters as well as
137 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
138 *
139 * @param a_Name The function name.
140 */
141
142/** @typedef PFNIEMOPRM
143 * Pointer to an opcode decoder function with RM byte.
144 */
145
146/** @def FNIEMOPRM_DEF
147 * Define an opcode decoder function with RM byte.
148 *
149 * We're using macors for this so that adding and removing parameters as well as
150 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
151 *
152 * @param a_Name The function name.
153 */
154
155#if defined(__GNUC__) && defined(RT_ARCH_X86)
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
157typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
158# define FNIEMOP_DEF(a_Name) \
159 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
160# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
161 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
162# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
163 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
164
165#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
167typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#elif defined(__GNUC__)
176typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
177typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
178# define FNIEMOP_DEF(a_Name) \
179 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
180# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
181 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
182# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
183 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
184
185#else
186typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
187typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
188# define FNIEMOP_DEF(a_Name) \
189 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
190# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
191 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
192# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
193 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
194
195#endif
196#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
197
198
199/**
200 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
201 */
202typedef union IEMSELDESC
203{
204 /** The legacy view. */
205 X86DESC Legacy;
206 /** The long mode view. */
207 X86DESC64 Long;
208} IEMSELDESC;
209/** Pointer to a selector descriptor table entry. */
210typedef IEMSELDESC *PIEMSELDESC;
211
212/**
213 * CPU exception classes.
214 */
215typedef enum IEMXCPTCLASS
216{
217 IEMXCPTCLASS_BENIGN,
218 IEMXCPTCLASS_CONTRIBUTORY,
219 IEMXCPTCLASS_PAGE_FAULT,
220 IEMXCPTCLASS_DOUBLE_FAULT
221} IEMXCPTCLASS;
222
223
224/*********************************************************************************************************************************
225* Defined Constants And Macros *
226*********************************************************************************************************************************/
227/** @def IEM_WITH_SETJMP
228 * Enables alternative status code handling using setjmps.
229 *
230 * This adds a bit of expense via the setjmp() call since it saves all the
231 * non-volatile registers. However, it eliminates return code checks and allows
232 * for more optimal return value passing (return regs instead of stack buffer).
233 */
234#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
235# define IEM_WITH_SETJMP
236#endif
237
238/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
239 * due to GCC lacking knowledge about the value range of a switch. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
241
242/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
244
245/**
246 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
247 * occation.
248 */
249#ifdef LOG_ENABLED
250# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
251 do { \
252 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
254 } while (0)
255#else
256# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
257 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
258#endif
259
260/**
261 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
262 * occation using the supplied logger statement.
263 *
264 * @param a_LoggerArgs What to log on failure.
265 */
266#ifdef LOG_ENABLED
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
268 do { \
269 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
270 /*LogFunc(a_LoggerArgs);*/ \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
272 } while (0)
273#else
274# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
275 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
276#endif
277
278/**
279 * Call an opcode decoder function.
280 *
281 * We're using macors for this so that adding and removing parameters can be
282 * done as we please. See FNIEMOP_DEF.
283 */
284#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
285
286/**
287 * Call a common opcode decoder function taking one extra argument.
288 *
289 * We're using macors for this so that adding and removing parameters can be
290 * done as we please. See FNIEMOP_DEF_1.
291 */
292#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
293
294/**
295 * Call a common opcode decoder function taking one extra argument.
296 *
297 * We're using macors for this so that adding and removing parameters can be
298 * done as we please. See FNIEMOP_DEF_1.
299 */
300#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
301
302/**
303 * Check if we're currently executing in real or virtual 8086 mode.
304 *
305 * @returns @c true if it is, @c false if not.
306 * @param a_pVCpu The IEM state of the current CPU.
307 */
308#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
309
310/**
311 * Check if we're currently executing in virtual 8086 mode.
312 *
313 * @returns @c true if it is, @c false if not.
314 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
315 */
316#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
317
318/**
319 * Check if we're currently executing in long mode.
320 *
321 * @returns @c true if it is, @c false if not.
322 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
323 */
324#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
325
326/**
327 * Check if we're currently executing in a 64-bit code segment.
328 *
329 * @returns @c true if it is, @c false if not.
330 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
331 */
332#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
333
334/**
335 * Check if we're currently executing in real mode.
336 *
337 * @returns @c true if it is, @c false if not.
338 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
339 */
340#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
341
342/**
343 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
344 * @returns PCCPUMFEATURES
345 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
346 */
347#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
348
349/**
350 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
351 * @returns PCCPUMFEATURES
352 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
353 */
354#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
355
356/**
357 * Evaluates to true if we're presenting an Intel CPU to the guest.
358 */
359#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
360
361/**
362 * Evaluates to true if we're presenting an AMD CPU to the guest.
363 */
364#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
365
366/**
367 * Check if the address is canonical.
368 */
369#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
370
371/**
372 * Gets the effective VEX.VVVV value.
373 *
374 * The 4th bit is ignored if not 64-bit code.
375 * @returns effective V-register value.
376 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
377 */
378#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
379 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
380
381/** @def IEM_USE_UNALIGNED_DATA_ACCESS
382 * Use unaligned accesses instead of elaborate byte assembly. */
383#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
384# define IEM_USE_UNALIGNED_DATA_ACCESS
385#endif
386
387#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
388
389/**
390 * Check if the guest has entered VMX root operation.
391 */
392# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
393
394/**
395 * Check if the guest has entered VMX non-root operation.
396 */
397# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
398
399/**
400 * Check if the nested-guest has the given Pin-based VM-execution control set.
401 */
402# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
403 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
404
405/**
406 * Check if the nested-guest has the given Processor-based VM-execution control set.
407 */
408#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
409 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
410
411/**
412 * Check if the nested-guest has the given Secondary Processor-based VM-execution
413 * control set.
414 */
415#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
416 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
417
418/**
419 * Invokes the VMX VM-exit handler for an instruction intercept.
420 */
421# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
422 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
423
424/**
425 * Invokes the VMX VM-exit handler for an instruction intercept where the
426 * instruction provides additional VM-exit information.
427 */
428# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
429 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
430
431/**
432 * Invokes the VMX VM-exit handler for a task switch.
433 */
434# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
435 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
436
437/**
438 * Invokes the VMX VM-exit handler for MWAIT.
439 */
440# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
441 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
442
443/**
444 * Invokes the VMX VM-exit handle for triple faults.
445 */
446# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
447 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
448
449#else
450# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
451# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
452# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
453# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
454# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
455# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
457# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
458# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
460
461#endif
462
463#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
464/**
465 * Check if an SVM control/instruction intercept is set.
466 */
467# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
468 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
469
470/**
471 * Check if an SVM read CRx intercept is set.
472 */
473# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
474 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
475
476/**
477 * Check if an SVM write CRx intercept is set.
478 */
479# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
480 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
481
482/**
483 * Check if an SVM read DRx intercept is set.
484 */
485# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
486 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
487
488/**
489 * Check if an SVM write DRx intercept is set.
490 */
491# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
492 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
493
494/**
495 * Check if an SVM exception intercept is set.
496 */
497# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
498 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
499
500/**
501 * Invokes the SVM \#VMEXIT handler for the nested-guest.
502 */
503# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
504 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
505
506/**
507 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
508 * corresponding decode assist information.
509 */
510# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
511 do \
512 { \
513 uint64_t uExitInfo1; \
514 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
515 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
516 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
517 else \
518 uExitInfo1 = 0; \
519 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
520 } while (0)
521
522/** Check and handles SVM nested-guest instruction intercept and updates
523 * NRIP if needed.
524 */
525# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
526 do \
527 { \
528 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
529 { \
530 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
531 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
532 } \
533 } while (0)
534
535/** Checks and handles SVM nested-guest CR0 read intercept. */
536# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
537 do \
538 { \
539 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
540 { /* probably likely */ } \
541 else \
542 { \
543 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
544 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
545 } \
546 } while (0)
547
548/**
549 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
550 */
551# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
552 do { \
553 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
554 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
555 } while (0)
556
557#else
558# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
559# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
560# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
561# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
562# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
563# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
564# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
565# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
566# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
567# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
568# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
569
570#endif
571
572
573/*********************************************************************************************************************************
574* Global Variables *
575*********************************************************************************************************************************/
576extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
577
578
579/** Function table for the ADD instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
581{
582 iemAImpl_add_u8, iemAImpl_add_u8_locked,
583 iemAImpl_add_u16, iemAImpl_add_u16_locked,
584 iemAImpl_add_u32, iemAImpl_add_u32_locked,
585 iemAImpl_add_u64, iemAImpl_add_u64_locked
586};
587
588/** Function table for the ADC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
590{
591 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
592 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
593 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
594 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
595};
596
597/** Function table for the SUB instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
599{
600 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
601 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
602 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
603 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
604};
605
606/** Function table for the SBB instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
608{
609 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
610 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
611 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
612 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
613};
614
615/** Function table for the OR instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
617{
618 iemAImpl_or_u8, iemAImpl_or_u8_locked,
619 iemAImpl_or_u16, iemAImpl_or_u16_locked,
620 iemAImpl_or_u32, iemAImpl_or_u32_locked,
621 iemAImpl_or_u64, iemAImpl_or_u64_locked
622};
623
624/** Function table for the XOR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
626{
627 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
628 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
629 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
630 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
631};
632
633/** Function table for the AND instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
635{
636 iemAImpl_and_u8, iemAImpl_and_u8_locked,
637 iemAImpl_and_u16, iemAImpl_and_u16_locked,
638 iemAImpl_and_u32, iemAImpl_and_u32_locked,
639 iemAImpl_and_u64, iemAImpl_and_u64_locked
640};
641
642/** Function table for the CMP instruction.
643 * @remarks Making operand order ASSUMPTIONS.
644 */
645IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
646{
647 iemAImpl_cmp_u8, NULL,
648 iemAImpl_cmp_u16, NULL,
649 iemAImpl_cmp_u32, NULL,
650 iemAImpl_cmp_u64, NULL
651};
652
653/** Function table for the TEST instruction.
654 * @remarks Making operand order ASSUMPTIONS.
655 */
656IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
657{
658 iemAImpl_test_u8, NULL,
659 iemAImpl_test_u16, NULL,
660 iemAImpl_test_u32, NULL,
661 iemAImpl_test_u64, NULL
662};
663
664/** Function table for the BT instruction. */
665IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
666{
667 NULL, NULL,
668 iemAImpl_bt_u16, NULL,
669 iemAImpl_bt_u32, NULL,
670 iemAImpl_bt_u64, NULL
671};
672
673/** Function table for the BTC instruction. */
674IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
675{
676 NULL, NULL,
677 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
678 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
679 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
680};
681
682/** Function table for the BTR instruction. */
683IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
684{
685 NULL, NULL,
686 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
687 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
688 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
689};
690
691/** Function table for the BTS instruction. */
692IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
693{
694 NULL, NULL,
695 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
696 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
697 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
698};
699
700/** Function table for the BSF instruction. */
701IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
702{
703 NULL, NULL,
704 iemAImpl_bsf_u16, NULL,
705 iemAImpl_bsf_u32, NULL,
706 iemAImpl_bsf_u64, NULL
707};
708
709/** Function table for the BSR instruction. */
710IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
711{
712 NULL, NULL,
713 iemAImpl_bsr_u16, NULL,
714 iemAImpl_bsr_u32, NULL,
715 iemAImpl_bsr_u64, NULL
716};
717
718/** Function table for the IMUL instruction. */
719IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
720{
721 NULL, NULL,
722 iemAImpl_imul_two_u16, NULL,
723 iemAImpl_imul_two_u32, NULL,
724 iemAImpl_imul_two_u64, NULL
725};
726
727/** Group 1 /r lookup table. */
728IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
729{
730 &g_iemAImpl_add,
731 &g_iemAImpl_or,
732 &g_iemAImpl_adc,
733 &g_iemAImpl_sbb,
734 &g_iemAImpl_and,
735 &g_iemAImpl_sub,
736 &g_iemAImpl_xor,
737 &g_iemAImpl_cmp
738};
739
740/** Function table for the INC instruction. */
741IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
742{
743 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
744 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
745 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
746 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
747};
748
749/** Function table for the DEC instruction. */
750IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
751{
752 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
753 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
754 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
755 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
756};
757
758/** Function table for the NEG instruction. */
759IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
760{
761 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
762 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
763 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
764 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
765};
766
767/** Function table for the NOT instruction. */
768IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
769{
770 iemAImpl_not_u8, iemAImpl_not_u8_locked,
771 iemAImpl_not_u16, iemAImpl_not_u16_locked,
772 iemAImpl_not_u32, iemAImpl_not_u32_locked,
773 iemAImpl_not_u64, iemAImpl_not_u64_locked
774};
775
776
777/** Function table for the ROL instruction. */
778IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
779{
780 iemAImpl_rol_u8,
781 iemAImpl_rol_u16,
782 iemAImpl_rol_u32,
783 iemAImpl_rol_u64
784};
785
786/** Function table for the ROR instruction. */
787IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
788{
789 iemAImpl_ror_u8,
790 iemAImpl_ror_u16,
791 iemAImpl_ror_u32,
792 iemAImpl_ror_u64
793};
794
795/** Function table for the RCL instruction. */
796IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
797{
798 iemAImpl_rcl_u8,
799 iemAImpl_rcl_u16,
800 iemAImpl_rcl_u32,
801 iemAImpl_rcl_u64
802};
803
804/** Function table for the RCR instruction. */
805IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
806{
807 iemAImpl_rcr_u8,
808 iemAImpl_rcr_u16,
809 iemAImpl_rcr_u32,
810 iemAImpl_rcr_u64
811};
812
813/** Function table for the SHL instruction. */
814IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
815{
816 iemAImpl_shl_u8,
817 iemAImpl_shl_u16,
818 iemAImpl_shl_u32,
819 iemAImpl_shl_u64
820};
821
822/** Function table for the SHR instruction. */
823IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
824{
825 iemAImpl_shr_u8,
826 iemAImpl_shr_u16,
827 iemAImpl_shr_u32,
828 iemAImpl_shr_u64
829};
830
831/** Function table for the SAR instruction. */
832IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
833{
834 iemAImpl_sar_u8,
835 iemAImpl_sar_u16,
836 iemAImpl_sar_u32,
837 iemAImpl_sar_u64
838};
839
840
841/** Function table for the MUL instruction. */
842IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
843{
844 iemAImpl_mul_u8,
845 iemAImpl_mul_u16,
846 iemAImpl_mul_u32,
847 iemAImpl_mul_u64
848};
849
850/** Function table for the IMUL instruction working implicitly on rAX. */
851IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
852{
853 iemAImpl_imul_u8,
854 iemAImpl_imul_u16,
855 iemAImpl_imul_u32,
856 iemAImpl_imul_u64
857};
858
859/** Function table for the DIV instruction. */
860IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
861{
862 iemAImpl_div_u8,
863 iemAImpl_div_u16,
864 iemAImpl_div_u32,
865 iemAImpl_div_u64
866};
867
868/** Function table for the MUL instruction. */
869IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
870{
871 iemAImpl_idiv_u8,
872 iemAImpl_idiv_u16,
873 iemAImpl_idiv_u32,
874 iemAImpl_idiv_u64
875};
876
877/** Function table for the SHLD instruction */
878IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
879{
880 iemAImpl_shld_u16,
881 iemAImpl_shld_u32,
882 iemAImpl_shld_u64,
883};
884
885/** Function table for the SHRD instruction */
886IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
887{
888 iemAImpl_shrd_u16,
889 iemAImpl_shrd_u32,
890 iemAImpl_shrd_u64,
891};
892
893
894/** Function table for the PUNPCKLBW instruction */
895IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
896/** Function table for the PUNPCKLBD instruction */
897IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
898/** Function table for the PUNPCKLDQ instruction */
899IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
900/** Function table for the PUNPCKLQDQ instruction */
901IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
902
903/** Function table for the PUNPCKHBW instruction */
904IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
905/** Function table for the PUNPCKHBD instruction */
906IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
907/** Function table for the PUNPCKHDQ instruction */
908IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
909/** Function table for the PUNPCKHQDQ instruction */
910IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
911
912/** Function table for the PXOR instruction */
913IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
914/** Function table for the PCMPEQB instruction */
915IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
916/** Function table for the PCMPEQW instruction */
917IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
918/** Function table for the PCMPEQD instruction */
919IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
920
921
922#if defined(IEM_LOG_MEMORY_WRITES)
923/** What IEM just wrote. */
924uint8_t g_abIemWrote[256];
925/** How much IEM just wrote. */
926size_t g_cbIemWrote;
927#endif
928
929
930/*********************************************************************************************************************************
931* Internal Functions *
932*********************************************************************************************************************************/
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
934IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
935IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
937/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
938IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
939IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
941IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
942IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
944IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
945IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
946IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
947IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
948IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
950#ifdef IEM_WITH_SETJMP
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
953DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
956#endif
957
958IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
959IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
960IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
970IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
972IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
973IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
974IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
975
976#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
979IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
987IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
988IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
989#endif
990
991#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
992IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
993IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
994#endif
995
996
997/**
998 * Sets the pass up status.
999 *
1000 * @returns VINF_SUCCESS.
1001 * @param pVCpu The cross context virtual CPU structure of the
1002 * calling thread.
1003 * @param rcPassUp The pass up status. Must be informational.
1004 * VINF_SUCCESS is not allowed.
1005 */
1006IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1007{
1008 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1009
1010 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1011 if (rcOldPassUp == VINF_SUCCESS)
1012 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1013 /* If both are EM scheduling codes, use EM priority rules. */
1014 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1015 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1016 {
1017 if (rcPassUp < rcOldPassUp)
1018 {
1019 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1020 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1021 }
1022 else
1023 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1024 }
1025 /* Override EM scheduling with specific status code. */
1026 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1027 {
1028 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1029 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1030 }
1031 /* Don't override specific status code, first come first served. */
1032 else
1033 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1034 return VINF_SUCCESS;
1035}
1036
1037
1038/**
1039 * Calculates the CPU mode.
1040 *
1041 * This is mainly for updating IEMCPU::enmCpuMode.
1042 *
1043 * @returns CPU mode.
1044 * @param pVCpu The cross context virtual CPU structure of the
1045 * calling thread.
1046 */
1047DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1048{
1049 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1050 return IEMMODE_64BIT;
1051 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1052 return IEMMODE_32BIT;
1053 return IEMMODE_16BIT;
1054}
1055
1056
1057/**
1058 * Initializes the execution state.
1059 *
1060 * @param pVCpu The cross context virtual CPU structure of the
1061 * calling thread.
1062 * @param fBypassHandlers Whether to bypass access handlers.
1063 *
1064 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1065 * side-effects in strict builds.
1066 */
1067DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1068{
1069 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1070 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1071
1072#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1081#endif
1082
1083#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1084 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1085#endif
1086 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1087 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1088#ifdef VBOX_STRICT
1089 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1090 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1091 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1092 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1093 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1094 pVCpu->iem.s.uRexReg = 127;
1095 pVCpu->iem.s.uRexB = 127;
1096 pVCpu->iem.s.offModRm = 127;
1097 pVCpu->iem.s.uRexIndex = 127;
1098 pVCpu->iem.s.iEffSeg = 127;
1099 pVCpu->iem.s.idxPrefix = 127;
1100 pVCpu->iem.s.uVex3rdReg = 127;
1101 pVCpu->iem.s.uVexLength = 127;
1102 pVCpu->iem.s.fEvexStuff = 127;
1103 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1104# ifdef IEM_WITH_CODE_TLB
1105 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1106 pVCpu->iem.s.pbInstrBuf = NULL;
1107 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1108 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1109 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1110 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1111# else
1112 pVCpu->iem.s.offOpcode = 127;
1113 pVCpu->iem.s.cbOpcode = 127;
1114# endif
1115#endif
1116
1117 pVCpu->iem.s.cActiveMappings = 0;
1118 pVCpu->iem.s.iNextMapping = 0;
1119 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1120 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1121#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1122 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1123 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1124 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1125 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1126 if (!pVCpu->iem.s.fInPatchCode)
1127 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1128#endif
1129}
1130
1131#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1132/**
1133 * Performs a minimal reinitialization of the execution state.
1134 *
1135 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1136 * 'world-switch' types operations on the CPU. Currently only nested
1137 * hardware-virtualization uses it.
1138 *
1139 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1140 */
1141IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1142{
1143 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1144 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1145
1146 pVCpu->iem.s.uCpl = uCpl;
1147 pVCpu->iem.s.enmCpuMode = enmMode;
1148 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffAddrMode = enmMode;
1150 if (enmMode != IEMMODE_64BIT)
1151 {
1152 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1153 pVCpu->iem.s.enmEffOpSize = enmMode;
1154 }
1155 else
1156 {
1157 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1158 pVCpu->iem.s.enmEffOpSize = enmMode;
1159 }
1160 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1161#ifndef IEM_WITH_CODE_TLB
1162 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1163 pVCpu->iem.s.offOpcode = 0;
1164 pVCpu->iem.s.cbOpcode = 0;
1165#endif
1166 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1167}
1168#endif
1169
1170/**
1171 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1172 *
1173 * @param pVCpu The cross context virtual CPU structure of the
1174 * calling thread.
1175 */
1176DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1177{
1178 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1179#ifdef VBOX_STRICT
1180# ifdef IEM_WITH_CODE_TLB
1181 NOREF(pVCpu);
1182# else
1183 pVCpu->iem.s.cbOpcode = 0;
1184# endif
1185#else
1186 NOREF(pVCpu);
1187#endif
1188}
1189
1190
1191/**
1192 * Initializes the decoder state.
1193 *
1194 * iemReInitDecoder is mostly a copy of this function.
1195 *
1196 * @param pVCpu The cross context virtual CPU structure of the
1197 * calling thread.
1198 * @param fBypassHandlers Whether to bypass access handlers.
1199 */
1200DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1201{
1202 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1203 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1204
1205#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1214#endif
1215
1216#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1217 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1218#endif
1219 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1220 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1221 pVCpu->iem.s.enmCpuMode = enmMode;
1222 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1223 pVCpu->iem.s.enmEffAddrMode = enmMode;
1224 if (enmMode != IEMMODE_64BIT)
1225 {
1226 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1227 pVCpu->iem.s.enmEffOpSize = enmMode;
1228 }
1229 else
1230 {
1231 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1232 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1233 }
1234 pVCpu->iem.s.fPrefixes = 0;
1235 pVCpu->iem.s.uRexReg = 0;
1236 pVCpu->iem.s.uRexB = 0;
1237 pVCpu->iem.s.uRexIndex = 0;
1238 pVCpu->iem.s.idxPrefix = 0;
1239 pVCpu->iem.s.uVex3rdReg = 0;
1240 pVCpu->iem.s.uVexLength = 0;
1241 pVCpu->iem.s.fEvexStuff = 0;
1242 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1243#ifdef IEM_WITH_CODE_TLB
1244 pVCpu->iem.s.pbInstrBuf = NULL;
1245 pVCpu->iem.s.offInstrNextByte = 0;
1246 pVCpu->iem.s.offCurInstrStart = 0;
1247# ifdef VBOX_STRICT
1248 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1249 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1250 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1251# endif
1252#else
1253 pVCpu->iem.s.offOpcode = 0;
1254 pVCpu->iem.s.cbOpcode = 0;
1255#endif
1256 pVCpu->iem.s.offModRm = 0;
1257 pVCpu->iem.s.cActiveMappings = 0;
1258 pVCpu->iem.s.iNextMapping = 0;
1259 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1260 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1261#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1262 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1263 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1264 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1265 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1266 if (!pVCpu->iem.s.fInPatchCode)
1267 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1268#endif
1269
1270#ifdef DBGFTRACE_ENABLED
1271 switch (enmMode)
1272 {
1273 case IEMMODE_64BIT:
1274 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1275 break;
1276 case IEMMODE_32BIT:
1277 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1278 break;
1279 case IEMMODE_16BIT:
1280 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1281 break;
1282 }
1283#endif
1284}
1285
1286
1287/**
1288 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1289 *
1290 * This is mostly a copy of iemInitDecoder.
1291 *
1292 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1293 */
1294DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1295{
1296 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1297
1298#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1300 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1302 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1307#endif
1308
1309 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1310 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1311 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1312 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1313 pVCpu->iem.s.enmEffAddrMode = enmMode;
1314 if (enmMode != IEMMODE_64BIT)
1315 {
1316 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1317 pVCpu->iem.s.enmEffOpSize = enmMode;
1318 }
1319 else
1320 {
1321 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1322 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1323 }
1324 pVCpu->iem.s.fPrefixes = 0;
1325 pVCpu->iem.s.uRexReg = 0;
1326 pVCpu->iem.s.uRexB = 0;
1327 pVCpu->iem.s.uRexIndex = 0;
1328 pVCpu->iem.s.idxPrefix = 0;
1329 pVCpu->iem.s.uVex3rdReg = 0;
1330 pVCpu->iem.s.uVexLength = 0;
1331 pVCpu->iem.s.fEvexStuff = 0;
1332 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1333#ifdef IEM_WITH_CODE_TLB
1334 if (pVCpu->iem.s.pbInstrBuf)
1335 {
1336 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1337 - pVCpu->iem.s.uInstrBufPc;
1338 if (off < pVCpu->iem.s.cbInstrBufTotal)
1339 {
1340 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1341 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1342 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1343 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1344 else
1345 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1346 }
1347 else
1348 {
1349 pVCpu->iem.s.pbInstrBuf = NULL;
1350 pVCpu->iem.s.offInstrNextByte = 0;
1351 pVCpu->iem.s.offCurInstrStart = 0;
1352 pVCpu->iem.s.cbInstrBuf = 0;
1353 pVCpu->iem.s.cbInstrBufTotal = 0;
1354 }
1355 }
1356 else
1357 {
1358 pVCpu->iem.s.offInstrNextByte = 0;
1359 pVCpu->iem.s.offCurInstrStart = 0;
1360 pVCpu->iem.s.cbInstrBuf = 0;
1361 pVCpu->iem.s.cbInstrBufTotal = 0;
1362 }
1363#else
1364 pVCpu->iem.s.cbOpcode = 0;
1365 pVCpu->iem.s.offOpcode = 0;
1366#endif
1367 pVCpu->iem.s.offModRm = 0;
1368 Assert(pVCpu->iem.s.cActiveMappings == 0);
1369 pVCpu->iem.s.iNextMapping = 0;
1370 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1371 Assert(pVCpu->iem.s.fBypassHandlers == false);
1372#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1373 if (!pVCpu->iem.s.fInPatchCode)
1374 { /* likely */ }
1375 else
1376 {
1377 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1378 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1379 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1380 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1381 if (!pVCpu->iem.s.fInPatchCode)
1382 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1383 }
1384#endif
1385
1386#ifdef DBGFTRACE_ENABLED
1387 switch (enmMode)
1388 {
1389 case IEMMODE_64BIT:
1390 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1391 break;
1392 case IEMMODE_32BIT:
1393 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1394 break;
1395 case IEMMODE_16BIT:
1396 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1397 break;
1398 }
1399#endif
1400}
1401
1402
1403
1404/**
1405 * Prefetch opcodes the first time when starting executing.
1406 *
1407 * @returns Strict VBox status code.
1408 * @param pVCpu The cross context virtual CPU structure of the
1409 * calling thread.
1410 * @param fBypassHandlers Whether to bypass access handlers.
1411 */
1412IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1413{
1414 iemInitDecoder(pVCpu, fBypassHandlers);
1415
1416#ifdef IEM_WITH_CODE_TLB
1417 /** @todo Do ITLB lookup here. */
1418
1419#else /* !IEM_WITH_CODE_TLB */
1420
1421 /*
1422 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1423 *
1424 * First translate CS:rIP to a physical address.
1425 */
1426 uint32_t cbToTryRead;
1427 RTGCPTR GCPtrPC;
1428 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1429 {
1430 cbToTryRead = PAGE_SIZE;
1431 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1432 if (IEM_IS_CANONICAL(GCPtrPC))
1433 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1434 else
1435 return iemRaiseGeneralProtectionFault0(pVCpu);
1436 }
1437 else
1438 {
1439 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1440 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1441 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1442 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1443 else
1444 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1445 if (cbToTryRead) { /* likely */ }
1446 else /* overflowed */
1447 {
1448 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1449 cbToTryRead = UINT32_MAX;
1450 }
1451 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1452 Assert(GCPtrPC <= UINT32_MAX);
1453 }
1454
1455# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1456 /* Allow interpretation of patch manager code blocks since they can for
1457 instance throw #PFs for perfectly good reasons. */
1458 if (pVCpu->iem.s.fInPatchCode)
1459 {
1460 size_t cbRead = 0;
1461 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1462 AssertRCReturn(rc, rc);
1463 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1464 return VINF_SUCCESS;
1465 }
1466# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1467
1468 RTGCPHYS GCPhys;
1469 uint64_t fFlags;
1470 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1471 if (RT_SUCCESS(rc)) { /* probable */ }
1472 else
1473 {
1474 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1475 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1476 }
1477 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1478 else
1479 {
1480 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1481 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1482 }
1483 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1484 else
1485 {
1486 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1487 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1488 }
1489 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1490 /** @todo Check reserved bits and such stuff. PGM is better at doing
1491 * that, so do it when implementing the guest virtual address
1492 * TLB... */
1493
1494 /*
1495 * Read the bytes at this address.
1496 */
1497 PVM pVM = pVCpu->CTX_SUFF(pVM);
1498# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1499 size_t cbActual;
1500 if ( PATMIsEnabled(pVM)
1501 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1502 {
1503 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1504 Assert(cbActual > 0);
1505 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1506 }
1507 else
1508# endif
1509 {
1510 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1511 if (cbToTryRead > cbLeftOnPage)
1512 cbToTryRead = cbLeftOnPage;
1513 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1514 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1515
1516 if (!pVCpu->iem.s.fBypassHandlers)
1517 {
1518 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1519 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1520 { /* likely */ }
1521 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1522 {
1523 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1524 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1525 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1526 }
1527 else
1528 {
1529 Log((RT_SUCCESS(rcStrict)
1530 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1531 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1532 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1533 return rcStrict;
1534 }
1535 }
1536 else
1537 {
1538 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1539 if (RT_SUCCESS(rc))
1540 { /* likely */ }
1541 else
1542 {
1543 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1544 GCPtrPC, GCPhys, rc, cbToTryRead));
1545 return rc;
1546 }
1547 }
1548 pVCpu->iem.s.cbOpcode = cbToTryRead;
1549 }
1550#endif /* !IEM_WITH_CODE_TLB */
1551 return VINF_SUCCESS;
1552}
1553
1554
1555/**
1556 * Invalidates the IEM TLBs.
1557 *
1558 * This is called internally as well as by PGM when moving GC mappings.
1559 *
1560 * @returns
1561 * @param pVCpu The cross context virtual CPU structure of the calling
1562 * thread.
1563 * @param fVmm Set when PGM calls us with a remapping.
1564 */
1565VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1566{
1567#ifdef IEM_WITH_CODE_TLB
1568 pVCpu->iem.s.cbInstrBufTotal = 0;
1569 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1570 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1571 { /* very likely */ }
1572 else
1573 {
1574 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1575 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1576 while (i-- > 0)
1577 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1578 }
1579#endif
1580
1581#ifdef IEM_WITH_DATA_TLB
1582 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1583 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1584 { /* very likely */ }
1585 else
1586 {
1587 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1588 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1589 while (i-- > 0)
1590 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1591 }
1592#endif
1593 NOREF(pVCpu); NOREF(fVmm);
1594}
1595
1596
1597/**
1598 * Invalidates a page in the TLBs.
1599 *
1600 * @param pVCpu The cross context virtual CPU structure of the calling
1601 * thread.
1602 * @param GCPtr The address of the page to invalidate
1603 */
1604VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1605{
1606#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1607 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1608 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1609 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1610 uintptr_t idx = (uint8_t)GCPtr;
1611
1612# ifdef IEM_WITH_CODE_TLB
1613 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1614 {
1615 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1616 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1617 pVCpu->iem.s.cbInstrBufTotal = 0;
1618 }
1619# endif
1620
1621# ifdef IEM_WITH_DATA_TLB
1622 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1623 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1624# endif
1625#else
1626 NOREF(pVCpu); NOREF(GCPtr);
1627#endif
1628}
1629
1630
1631/**
1632 * Invalidates the host physical aspects of the IEM TLBs.
1633 *
1634 * This is called internally as well as by PGM when moving GC mappings.
1635 *
1636 * @param pVCpu The cross context virtual CPU structure of the calling
1637 * thread.
1638 */
1639VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1640{
1641#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1642 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1643
1644# ifdef IEM_WITH_CODE_TLB
1645 pVCpu->iem.s.cbInstrBufTotal = 0;
1646# endif
1647 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1648 if (uTlbPhysRev != 0)
1649 {
1650 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1651 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1652 }
1653 else
1654 {
1655 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1656 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1657
1658 unsigned i;
1659# ifdef IEM_WITH_CODE_TLB
1660 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1661 while (i-- > 0)
1662 {
1663 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1664 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1665 }
1666# endif
1667# ifdef IEM_WITH_DATA_TLB
1668 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1669 while (i-- > 0)
1670 {
1671 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1672 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1673 }
1674# endif
1675 }
1676#else
1677 NOREF(pVCpu);
1678#endif
1679}
1680
1681
1682/**
1683 * Invalidates the host physical aspects of the IEM TLBs.
1684 *
1685 * This is called internally as well as by PGM when moving GC mappings.
1686 *
1687 * @param pVM The cross context VM structure.
1688 *
1689 * @remarks Caller holds the PGM lock.
1690 */
1691VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1692{
1693 RT_NOREF_PV(pVM);
1694}
1695
1696#ifdef IEM_WITH_CODE_TLB
1697
1698/**
1699 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1700 * failure and jumps.
1701 *
1702 * We end up here for a number of reasons:
1703 * - pbInstrBuf isn't yet initialized.
1704 * - Advancing beyond the buffer boundrary (e.g. cross page).
1705 * - Advancing beyond the CS segment limit.
1706 * - Fetching from non-mappable page (e.g. MMIO).
1707 *
1708 * @param pVCpu The cross context virtual CPU structure of the
1709 * calling thread.
1710 * @param pvDst Where to return the bytes.
1711 * @param cbDst Number of bytes to read.
1712 *
1713 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1714 */
1715IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1716{
1717#ifdef IN_RING3
1718 for (;;)
1719 {
1720 Assert(cbDst <= 8);
1721 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1722
1723 /*
1724 * We might have a partial buffer match, deal with that first to make the
1725 * rest simpler. This is the first part of the cross page/buffer case.
1726 */
1727 if (pVCpu->iem.s.pbInstrBuf != NULL)
1728 {
1729 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1730 {
1731 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1732 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1733 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1734
1735 cbDst -= cbCopy;
1736 pvDst = (uint8_t *)pvDst + cbCopy;
1737 offBuf += cbCopy;
1738 pVCpu->iem.s.offInstrNextByte += offBuf;
1739 }
1740 }
1741
1742 /*
1743 * Check segment limit, figuring how much we're allowed to access at this point.
1744 *
1745 * We will fault immediately if RIP is past the segment limit / in non-canonical
1746 * territory. If we do continue, there are one or more bytes to read before we
1747 * end up in trouble and we need to do that first before faulting.
1748 */
1749 RTGCPTR GCPtrFirst;
1750 uint32_t cbMaxRead;
1751 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1752 {
1753 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1754 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1755 { /* likely */ }
1756 else
1757 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1758 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1759 }
1760 else
1761 {
1762 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1763 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1764 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1765 { /* likely */ }
1766 else
1767 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1768 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1769 if (cbMaxRead != 0)
1770 { /* likely */ }
1771 else
1772 {
1773 /* Overflowed because address is 0 and limit is max. */
1774 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1775 cbMaxRead = X86_PAGE_SIZE;
1776 }
1777 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1778 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1779 if (cbMaxRead2 < cbMaxRead)
1780 cbMaxRead = cbMaxRead2;
1781 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1782 }
1783
1784 /*
1785 * Get the TLB entry for this piece of code.
1786 */
1787 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1788 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1789 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1790 if (pTlbe->uTag == uTag)
1791 {
1792 /* likely when executing lots of code, otherwise unlikely */
1793# ifdef VBOX_WITH_STATISTICS
1794 pVCpu->iem.s.CodeTlb.cTlbHits++;
1795# endif
1796 }
1797 else
1798 {
1799 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1800# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1801 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1802 {
1803 pTlbe->uTag = uTag;
1804 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1805 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1806 pTlbe->GCPhys = NIL_RTGCPHYS;
1807 pTlbe->pbMappingR3 = NULL;
1808 }
1809 else
1810# endif
1811 {
1812 RTGCPHYS GCPhys;
1813 uint64_t fFlags;
1814 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1815 if (RT_FAILURE(rc))
1816 {
1817 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1818 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1819 }
1820
1821 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1822 pTlbe->uTag = uTag;
1823 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1824 pTlbe->GCPhys = GCPhys;
1825 pTlbe->pbMappingR3 = NULL;
1826 }
1827 }
1828
1829 /*
1830 * Check TLB page table level access flags.
1831 */
1832 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1833 {
1834 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1835 {
1836 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1837 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1838 }
1839 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1840 {
1841 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1842 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1843 }
1844 }
1845
1846# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1847 /*
1848 * Allow interpretation of patch manager code blocks since they can for
1849 * instance throw #PFs for perfectly good reasons.
1850 */
1851 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1852 { /* no unlikely */ }
1853 else
1854 {
1855 /** @todo Could be optimized this a little in ring-3 if we liked. */
1856 size_t cbRead = 0;
1857 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1858 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1859 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1860 return;
1861 }
1862# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1863
1864 /*
1865 * Look up the physical page info if necessary.
1866 */
1867 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1868 { /* not necessary */ }
1869 else
1870 {
1871 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1872 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1873 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1874 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1875 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1876 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1877 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1878 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1879 }
1880
1881# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1882 /*
1883 * Try do a direct read using the pbMappingR3 pointer.
1884 */
1885 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1886 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1887 {
1888 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1889 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1890 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1891 {
1892 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1893 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1894 }
1895 else
1896 {
1897 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1898 Assert(cbInstr < cbMaxRead);
1899 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1900 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1901 }
1902 if (cbDst <= cbMaxRead)
1903 {
1904 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1905 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1906 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1907 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1908 return;
1909 }
1910 pVCpu->iem.s.pbInstrBuf = NULL;
1911
1912 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1913 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1914 }
1915 else
1916# endif
1917#if 0
1918 /*
1919 * If there is no special read handling, so we can read a bit more and
1920 * put it in the prefetch buffer.
1921 */
1922 if ( cbDst < cbMaxRead
1923 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1924 {
1925 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1926 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1927 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1928 { /* likely */ }
1929 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1930 {
1931 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1932 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1933 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1934 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1935 }
1936 else
1937 {
1938 Log((RT_SUCCESS(rcStrict)
1939 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1940 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1941 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1942 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1943 }
1944 }
1945 /*
1946 * Special read handling, so only read exactly what's needed.
1947 * This is a highly unlikely scenario.
1948 */
1949 else
1950#endif
1951 {
1952 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1953 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1954 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1955 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1956 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1957 { /* likely */ }
1958 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1959 {
1960 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1961 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1962 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1963 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1964 }
1965 else
1966 {
1967 Log((RT_SUCCESS(rcStrict)
1968 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1969 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1970 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1971 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1972 }
1973 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1974 if (cbToRead == cbDst)
1975 return;
1976 }
1977
1978 /*
1979 * More to read, loop.
1980 */
1981 cbDst -= cbMaxRead;
1982 pvDst = (uint8_t *)pvDst + cbMaxRead;
1983 }
1984#else
1985 RT_NOREF(pvDst, cbDst);
1986 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1987#endif
1988}
1989
1990#else
1991
1992/**
1993 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1994 * exception if it fails.
1995 *
1996 * @returns Strict VBox status code.
1997 * @param pVCpu The cross context virtual CPU structure of the
1998 * calling thread.
1999 * @param cbMin The minimum number of bytes relative offOpcode
2000 * that must be read.
2001 */
2002IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2003{
2004 /*
2005 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2006 *
2007 * First translate CS:rIP to a physical address.
2008 */
2009 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2010 uint32_t cbToTryRead;
2011 RTGCPTR GCPtrNext;
2012 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2013 {
2014 cbToTryRead = PAGE_SIZE;
2015 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2016 if (!IEM_IS_CANONICAL(GCPtrNext))
2017 return iemRaiseGeneralProtectionFault0(pVCpu);
2018 }
2019 else
2020 {
2021 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2022 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2023 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2024 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2025 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2026 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2027 if (!cbToTryRead) /* overflowed */
2028 {
2029 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2030 cbToTryRead = UINT32_MAX;
2031 /** @todo check out wrapping around the code segment. */
2032 }
2033 if (cbToTryRead < cbMin - cbLeft)
2034 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2035 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2036 }
2037
2038 /* Only read up to the end of the page, and make sure we don't read more
2039 than the opcode buffer can hold. */
2040 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2041 if (cbToTryRead > cbLeftOnPage)
2042 cbToTryRead = cbLeftOnPage;
2043 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2044 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2045/** @todo r=bird: Convert assertion into undefined opcode exception? */
2046 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2047
2048# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2049 /* Allow interpretation of patch manager code blocks since they can for
2050 instance throw #PFs for perfectly good reasons. */
2051 if (pVCpu->iem.s.fInPatchCode)
2052 {
2053 size_t cbRead = 0;
2054 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2055 AssertRCReturn(rc, rc);
2056 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2057 return VINF_SUCCESS;
2058 }
2059# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2060
2061 RTGCPHYS GCPhys;
2062 uint64_t fFlags;
2063 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2064 if (RT_FAILURE(rc))
2065 {
2066 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2067 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2068 }
2069 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2070 {
2071 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2072 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2073 }
2074 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2075 {
2076 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2077 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2078 }
2079 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2080 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2081 /** @todo Check reserved bits and such stuff. PGM is better at doing
2082 * that, so do it when implementing the guest virtual address
2083 * TLB... */
2084
2085 /*
2086 * Read the bytes at this address.
2087 *
2088 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2089 * and since PATM should only patch the start of an instruction there
2090 * should be no need to check again here.
2091 */
2092 if (!pVCpu->iem.s.fBypassHandlers)
2093 {
2094 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2095 cbToTryRead, PGMACCESSORIGIN_IEM);
2096 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2097 { /* likely */ }
2098 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2099 {
2100 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2101 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2102 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2103 }
2104 else
2105 {
2106 Log((RT_SUCCESS(rcStrict)
2107 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2108 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2109 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2110 return rcStrict;
2111 }
2112 }
2113 else
2114 {
2115 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2116 if (RT_SUCCESS(rc))
2117 { /* likely */ }
2118 else
2119 {
2120 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2121 return rc;
2122 }
2123 }
2124 pVCpu->iem.s.cbOpcode += cbToTryRead;
2125 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2126
2127 return VINF_SUCCESS;
2128}
2129
2130#endif /* !IEM_WITH_CODE_TLB */
2131#ifndef IEM_WITH_SETJMP
2132
2133/**
2134 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2135 *
2136 * @returns Strict VBox status code.
2137 * @param pVCpu The cross context virtual CPU structure of the
2138 * calling thread.
2139 * @param pb Where to return the opcode byte.
2140 */
2141DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2142{
2143 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2144 if (rcStrict == VINF_SUCCESS)
2145 {
2146 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2147 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2148 pVCpu->iem.s.offOpcode = offOpcode + 1;
2149 }
2150 else
2151 *pb = 0;
2152 return rcStrict;
2153}
2154
2155
2156/**
2157 * Fetches the next opcode byte.
2158 *
2159 * @returns Strict VBox status code.
2160 * @param pVCpu The cross context virtual CPU structure of the
2161 * calling thread.
2162 * @param pu8 Where to return the opcode byte.
2163 */
2164DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2165{
2166 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2167 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2168 {
2169 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2170 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2171 return VINF_SUCCESS;
2172 }
2173 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2174}
2175
2176#else /* IEM_WITH_SETJMP */
2177
2178/**
2179 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2180 *
2181 * @returns The opcode byte.
2182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2183 */
2184DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2185{
2186# ifdef IEM_WITH_CODE_TLB
2187 uint8_t u8;
2188 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2189 return u8;
2190# else
2191 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2192 if (rcStrict == VINF_SUCCESS)
2193 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2194 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2195# endif
2196}
2197
2198
2199/**
2200 * Fetches the next opcode byte, longjmp on error.
2201 *
2202 * @returns The opcode byte.
2203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2204 */
2205DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2206{
2207# ifdef IEM_WITH_CODE_TLB
2208 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2209 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2210 if (RT_LIKELY( pbBuf != NULL
2211 && offBuf < pVCpu->iem.s.cbInstrBuf))
2212 {
2213 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2214 return pbBuf[offBuf];
2215 }
2216# else
2217 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2218 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2219 {
2220 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2221 return pVCpu->iem.s.abOpcode[offOpcode];
2222 }
2223# endif
2224 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2225}
2226
2227#endif /* IEM_WITH_SETJMP */
2228
2229/**
2230 * Fetches the next opcode byte, returns automatically on failure.
2231 *
2232 * @param a_pu8 Where to return the opcode byte.
2233 * @remark Implicitly references pVCpu.
2234 */
2235#ifndef IEM_WITH_SETJMP
2236# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2237 do \
2238 { \
2239 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2240 if (rcStrict2 == VINF_SUCCESS) \
2241 { /* likely */ } \
2242 else \
2243 return rcStrict2; \
2244 } while (0)
2245#else
2246# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2247#endif /* IEM_WITH_SETJMP */
2248
2249
2250#ifndef IEM_WITH_SETJMP
2251/**
2252 * Fetches the next signed byte from the opcode stream.
2253 *
2254 * @returns Strict VBox status code.
2255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2256 * @param pi8 Where to return the signed byte.
2257 */
2258DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2259{
2260 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2261}
2262#endif /* !IEM_WITH_SETJMP */
2263
2264
2265/**
2266 * Fetches the next signed byte from the opcode stream, returning automatically
2267 * on failure.
2268 *
2269 * @param a_pi8 Where to return the signed byte.
2270 * @remark Implicitly references pVCpu.
2271 */
2272#ifndef IEM_WITH_SETJMP
2273# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2274 do \
2275 { \
2276 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2277 if (rcStrict2 != VINF_SUCCESS) \
2278 return rcStrict2; \
2279 } while (0)
2280#else /* IEM_WITH_SETJMP */
2281# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2282
2283#endif /* IEM_WITH_SETJMP */
2284
2285#ifndef IEM_WITH_SETJMP
2286
2287/**
2288 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2289 *
2290 * @returns Strict VBox status code.
2291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2292 * @param pu16 Where to return the opcode dword.
2293 */
2294DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2295{
2296 uint8_t u8;
2297 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2298 if (rcStrict == VINF_SUCCESS)
2299 *pu16 = (int8_t)u8;
2300 return rcStrict;
2301}
2302
2303
2304/**
2305 * Fetches the next signed byte from the opcode stream, extending it to
2306 * unsigned 16-bit.
2307 *
2308 * @returns Strict VBox status code.
2309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2310 * @param pu16 Where to return the unsigned word.
2311 */
2312DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2313{
2314 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2315 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2316 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2317
2318 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2319 pVCpu->iem.s.offOpcode = offOpcode + 1;
2320 return VINF_SUCCESS;
2321}
2322
2323#endif /* !IEM_WITH_SETJMP */
2324
2325/**
2326 * Fetches the next signed byte from the opcode stream and sign-extending it to
2327 * a word, returning automatically on failure.
2328 *
2329 * @param a_pu16 Where to return the word.
2330 * @remark Implicitly references pVCpu.
2331 */
2332#ifndef IEM_WITH_SETJMP
2333# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2334 do \
2335 { \
2336 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2337 if (rcStrict2 != VINF_SUCCESS) \
2338 return rcStrict2; \
2339 } while (0)
2340#else
2341# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2342#endif
2343
2344#ifndef IEM_WITH_SETJMP
2345
2346/**
2347 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2348 *
2349 * @returns Strict VBox status code.
2350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2351 * @param pu32 Where to return the opcode dword.
2352 */
2353DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2354{
2355 uint8_t u8;
2356 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2357 if (rcStrict == VINF_SUCCESS)
2358 *pu32 = (int8_t)u8;
2359 return rcStrict;
2360}
2361
2362
2363/**
2364 * Fetches the next signed byte from the opcode stream, extending it to
2365 * unsigned 32-bit.
2366 *
2367 * @returns Strict VBox status code.
2368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2369 * @param pu32 Where to return the unsigned dword.
2370 */
2371DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2372{
2373 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2374 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2375 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2376
2377 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2378 pVCpu->iem.s.offOpcode = offOpcode + 1;
2379 return VINF_SUCCESS;
2380}
2381
2382#endif /* !IEM_WITH_SETJMP */
2383
2384/**
2385 * Fetches the next signed byte from the opcode stream and sign-extending it to
2386 * a word, returning automatically on failure.
2387 *
2388 * @param a_pu32 Where to return the word.
2389 * @remark Implicitly references pVCpu.
2390 */
2391#ifndef IEM_WITH_SETJMP
2392#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2393 do \
2394 { \
2395 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2396 if (rcStrict2 != VINF_SUCCESS) \
2397 return rcStrict2; \
2398 } while (0)
2399#else
2400# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2401#endif
2402
2403#ifndef IEM_WITH_SETJMP
2404
2405/**
2406 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2407 *
2408 * @returns Strict VBox status code.
2409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2410 * @param pu64 Where to return the opcode qword.
2411 */
2412DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2413{
2414 uint8_t u8;
2415 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2416 if (rcStrict == VINF_SUCCESS)
2417 *pu64 = (int8_t)u8;
2418 return rcStrict;
2419}
2420
2421
2422/**
2423 * Fetches the next signed byte from the opcode stream, extending it to
2424 * unsigned 64-bit.
2425 *
2426 * @returns Strict VBox status code.
2427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2428 * @param pu64 Where to return the unsigned qword.
2429 */
2430DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2431{
2432 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2433 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2434 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2435
2436 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2437 pVCpu->iem.s.offOpcode = offOpcode + 1;
2438 return VINF_SUCCESS;
2439}
2440
2441#endif /* !IEM_WITH_SETJMP */
2442
2443
2444/**
2445 * Fetches the next signed byte from the opcode stream and sign-extending it to
2446 * a word, returning automatically on failure.
2447 *
2448 * @param a_pu64 Where to return the word.
2449 * @remark Implicitly references pVCpu.
2450 */
2451#ifndef IEM_WITH_SETJMP
2452# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2453 do \
2454 { \
2455 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2456 if (rcStrict2 != VINF_SUCCESS) \
2457 return rcStrict2; \
2458 } while (0)
2459#else
2460# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2461#endif
2462
2463
2464#ifndef IEM_WITH_SETJMP
2465/**
2466 * Fetches the next opcode byte.
2467 *
2468 * @returns Strict VBox status code.
2469 * @param pVCpu The cross context virtual CPU structure of the
2470 * calling thread.
2471 * @param pu8 Where to return the opcode byte.
2472 */
2473DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2474{
2475 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2476 pVCpu->iem.s.offModRm = offOpcode;
2477 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2478 {
2479 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2480 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2481 return VINF_SUCCESS;
2482 }
2483 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2484}
2485#else /* IEM_WITH_SETJMP */
2486/**
2487 * Fetches the next opcode byte, longjmp on error.
2488 *
2489 * @returns The opcode byte.
2490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2491 */
2492DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2493{
2494# ifdef IEM_WITH_CODE_TLB
2495 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2496 pVCpu->iem.s.offModRm = offBuf;
2497 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2498 if (RT_LIKELY( pbBuf != NULL
2499 && offBuf < pVCpu->iem.s.cbInstrBuf))
2500 {
2501 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2502 return pbBuf[offBuf];
2503 }
2504# else
2505 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2506 pVCpu->iem.s.offModRm = offOpcode;
2507 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2508 {
2509 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2510 return pVCpu->iem.s.abOpcode[offOpcode];
2511 }
2512# endif
2513 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2514}
2515#endif /* IEM_WITH_SETJMP */
2516
2517/**
2518 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2519 * on failure.
2520 *
2521 * Will note down the position of the ModR/M byte for VT-x exits.
2522 *
2523 * @param a_pbRm Where to return the RM opcode byte.
2524 * @remark Implicitly references pVCpu.
2525 */
2526#ifndef IEM_WITH_SETJMP
2527# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2528 do \
2529 { \
2530 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2531 if (rcStrict2 == VINF_SUCCESS) \
2532 { /* likely */ } \
2533 else \
2534 return rcStrict2; \
2535 } while (0)
2536#else
2537# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2538#endif /* IEM_WITH_SETJMP */
2539
2540
2541#ifndef IEM_WITH_SETJMP
2542
2543/**
2544 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2545 *
2546 * @returns Strict VBox status code.
2547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2548 * @param pu16 Where to return the opcode word.
2549 */
2550DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2551{
2552 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2553 if (rcStrict == VINF_SUCCESS)
2554 {
2555 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2556# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2557 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2558# else
2559 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2560# endif
2561 pVCpu->iem.s.offOpcode = offOpcode + 2;
2562 }
2563 else
2564 *pu16 = 0;
2565 return rcStrict;
2566}
2567
2568
2569/**
2570 * Fetches the next opcode word.
2571 *
2572 * @returns Strict VBox status code.
2573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2574 * @param pu16 Where to return the opcode word.
2575 */
2576DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2577{
2578 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2579 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2580 {
2581 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2582# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2583 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2584# else
2585 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2586# endif
2587 return VINF_SUCCESS;
2588 }
2589 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2590}
2591
2592#else /* IEM_WITH_SETJMP */
2593
2594/**
2595 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2596 *
2597 * @returns The opcode word.
2598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2599 */
2600DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2601{
2602# ifdef IEM_WITH_CODE_TLB
2603 uint16_t u16;
2604 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2605 return u16;
2606# else
2607 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2608 if (rcStrict == VINF_SUCCESS)
2609 {
2610 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2611 pVCpu->iem.s.offOpcode += 2;
2612# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2613 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2614# else
2615 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2616# endif
2617 }
2618 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2619# endif
2620}
2621
2622
2623/**
2624 * Fetches the next opcode word, longjmp on error.
2625 *
2626 * @returns The opcode word.
2627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2628 */
2629DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2630{
2631# ifdef IEM_WITH_CODE_TLB
2632 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2633 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2634 if (RT_LIKELY( pbBuf != NULL
2635 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2636 {
2637 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2638# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2639 return *(uint16_t const *)&pbBuf[offBuf];
2640# else
2641 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2642# endif
2643 }
2644# else
2645 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2646 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2647 {
2648 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2649# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2650 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2651# else
2652 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2653# endif
2654 }
2655# endif
2656 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2657}
2658
2659#endif /* IEM_WITH_SETJMP */
2660
2661
2662/**
2663 * Fetches the next opcode word, returns automatically on failure.
2664 *
2665 * @param a_pu16 Where to return the opcode word.
2666 * @remark Implicitly references pVCpu.
2667 */
2668#ifndef IEM_WITH_SETJMP
2669# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2670 do \
2671 { \
2672 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2673 if (rcStrict2 != VINF_SUCCESS) \
2674 return rcStrict2; \
2675 } while (0)
2676#else
2677# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2678#endif
2679
2680#ifndef IEM_WITH_SETJMP
2681
2682/**
2683 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2684 *
2685 * @returns Strict VBox status code.
2686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2687 * @param pu32 Where to return the opcode double word.
2688 */
2689DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2690{
2691 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2692 if (rcStrict == VINF_SUCCESS)
2693 {
2694 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2695 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2696 pVCpu->iem.s.offOpcode = offOpcode + 2;
2697 }
2698 else
2699 *pu32 = 0;
2700 return rcStrict;
2701}
2702
2703
2704/**
2705 * Fetches the next opcode word, zero extending it to a double word.
2706 *
2707 * @returns Strict VBox status code.
2708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2709 * @param pu32 Where to return the opcode double word.
2710 */
2711DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2712{
2713 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2714 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2715 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2716
2717 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2718 pVCpu->iem.s.offOpcode = offOpcode + 2;
2719 return VINF_SUCCESS;
2720}
2721
2722#endif /* !IEM_WITH_SETJMP */
2723
2724
2725/**
2726 * Fetches the next opcode word and zero extends it to a double word, returns
2727 * automatically on failure.
2728 *
2729 * @param a_pu32 Where to return the opcode double word.
2730 * @remark Implicitly references pVCpu.
2731 */
2732#ifndef IEM_WITH_SETJMP
2733# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2734 do \
2735 { \
2736 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2737 if (rcStrict2 != VINF_SUCCESS) \
2738 return rcStrict2; \
2739 } while (0)
2740#else
2741# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2742#endif
2743
2744#ifndef IEM_WITH_SETJMP
2745
2746/**
2747 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2748 *
2749 * @returns Strict VBox status code.
2750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2751 * @param pu64 Where to return the opcode quad word.
2752 */
2753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2754{
2755 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2756 if (rcStrict == VINF_SUCCESS)
2757 {
2758 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2759 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2760 pVCpu->iem.s.offOpcode = offOpcode + 2;
2761 }
2762 else
2763 *pu64 = 0;
2764 return rcStrict;
2765}
2766
2767
2768/**
2769 * Fetches the next opcode word, zero extending it to a quad word.
2770 *
2771 * @returns Strict VBox status code.
2772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2773 * @param pu64 Where to return the opcode quad word.
2774 */
2775DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2776{
2777 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2778 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2779 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2780
2781 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2782 pVCpu->iem.s.offOpcode = offOpcode + 2;
2783 return VINF_SUCCESS;
2784}
2785
2786#endif /* !IEM_WITH_SETJMP */
2787
2788/**
2789 * Fetches the next opcode word and zero extends it to a quad word, returns
2790 * automatically on failure.
2791 *
2792 * @param a_pu64 Where to return the opcode quad word.
2793 * @remark Implicitly references pVCpu.
2794 */
2795#ifndef IEM_WITH_SETJMP
2796# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2797 do \
2798 { \
2799 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2800 if (rcStrict2 != VINF_SUCCESS) \
2801 return rcStrict2; \
2802 } while (0)
2803#else
2804# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2805#endif
2806
2807
2808#ifndef IEM_WITH_SETJMP
2809/**
2810 * Fetches the next signed word from the opcode stream.
2811 *
2812 * @returns Strict VBox status code.
2813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2814 * @param pi16 Where to return the signed word.
2815 */
2816DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2817{
2818 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2819}
2820#endif /* !IEM_WITH_SETJMP */
2821
2822
2823/**
2824 * Fetches the next signed word from the opcode stream, returning automatically
2825 * on failure.
2826 *
2827 * @param a_pi16 Where to return the signed word.
2828 * @remark Implicitly references pVCpu.
2829 */
2830#ifndef IEM_WITH_SETJMP
2831# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2832 do \
2833 { \
2834 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2835 if (rcStrict2 != VINF_SUCCESS) \
2836 return rcStrict2; \
2837 } while (0)
2838#else
2839# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2840#endif
2841
2842#ifndef IEM_WITH_SETJMP
2843
2844/**
2845 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2846 *
2847 * @returns Strict VBox status code.
2848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2849 * @param pu32 Where to return the opcode dword.
2850 */
2851DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2852{
2853 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2854 if (rcStrict == VINF_SUCCESS)
2855 {
2856 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2857# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2858 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2859# else
2860 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2861 pVCpu->iem.s.abOpcode[offOpcode + 1],
2862 pVCpu->iem.s.abOpcode[offOpcode + 2],
2863 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2864# endif
2865 pVCpu->iem.s.offOpcode = offOpcode + 4;
2866 }
2867 else
2868 *pu32 = 0;
2869 return rcStrict;
2870}
2871
2872
2873/**
2874 * Fetches the next opcode dword.
2875 *
2876 * @returns Strict VBox status code.
2877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2878 * @param pu32 Where to return the opcode double word.
2879 */
2880DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2881{
2882 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2883 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2884 {
2885 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2886# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2887 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2888# else
2889 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2890 pVCpu->iem.s.abOpcode[offOpcode + 1],
2891 pVCpu->iem.s.abOpcode[offOpcode + 2],
2892 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2893# endif
2894 return VINF_SUCCESS;
2895 }
2896 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2897}
2898
2899#else /* !IEM_WITH_SETJMP */
2900
2901/**
2902 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2903 *
2904 * @returns The opcode dword.
2905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2906 */
2907DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2908{
2909# ifdef IEM_WITH_CODE_TLB
2910 uint32_t u32;
2911 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2912 return u32;
2913# else
2914 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2915 if (rcStrict == VINF_SUCCESS)
2916 {
2917 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2918 pVCpu->iem.s.offOpcode = offOpcode + 4;
2919# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2920 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2921# else
2922 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2923 pVCpu->iem.s.abOpcode[offOpcode + 1],
2924 pVCpu->iem.s.abOpcode[offOpcode + 2],
2925 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2926# endif
2927 }
2928 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2929# endif
2930}
2931
2932
2933/**
2934 * Fetches the next opcode dword, longjmp on error.
2935 *
2936 * @returns The opcode dword.
2937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2938 */
2939DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2940{
2941# ifdef IEM_WITH_CODE_TLB
2942 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2943 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2944 if (RT_LIKELY( pbBuf != NULL
2945 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2946 {
2947 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2948# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2949 return *(uint32_t const *)&pbBuf[offBuf];
2950# else
2951 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2952 pbBuf[offBuf + 1],
2953 pbBuf[offBuf + 2],
2954 pbBuf[offBuf + 3]);
2955# endif
2956 }
2957# else
2958 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2959 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2960 {
2961 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2962# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2963 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2964# else
2965 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2966 pVCpu->iem.s.abOpcode[offOpcode + 1],
2967 pVCpu->iem.s.abOpcode[offOpcode + 2],
2968 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2969# endif
2970 }
2971# endif
2972 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2973}
2974
2975#endif /* !IEM_WITH_SETJMP */
2976
2977
2978/**
2979 * Fetches the next opcode dword, returns automatically on failure.
2980 *
2981 * @param a_pu32 Where to return the opcode dword.
2982 * @remark Implicitly references pVCpu.
2983 */
2984#ifndef IEM_WITH_SETJMP
2985# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2986 do \
2987 { \
2988 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2989 if (rcStrict2 != VINF_SUCCESS) \
2990 return rcStrict2; \
2991 } while (0)
2992#else
2993# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2994#endif
2995
2996#ifndef IEM_WITH_SETJMP
2997
2998/**
2999 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3000 *
3001 * @returns Strict VBox status code.
3002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3003 * @param pu64 Where to return the opcode dword.
3004 */
3005DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3006{
3007 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3008 if (rcStrict == VINF_SUCCESS)
3009 {
3010 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3011 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3012 pVCpu->iem.s.abOpcode[offOpcode + 1],
3013 pVCpu->iem.s.abOpcode[offOpcode + 2],
3014 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3015 pVCpu->iem.s.offOpcode = offOpcode + 4;
3016 }
3017 else
3018 *pu64 = 0;
3019 return rcStrict;
3020}
3021
3022
3023/**
3024 * Fetches the next opcode dword, zero extending it to a quad word.
3025 *
3026 * @returns Strict VBox status code.
3027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3028 * @param pu64 Where to return the opcode quad word.
3029 */
3030DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3031{
3032 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3033 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3034 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3035
3036 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3037 pVCpu->iem.s.abOpcode[offOpcode + 1],
3038 pVCpu->iem.s.abOpcode[offOpcode + 2],
3039 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3040 pVCpu->iem.s.offOpcode = offOpcode + 4;
3041 return VINF_SUCCESS;
3042}
3043
3044#endif /* !IEM_WITH_SETJMP */
3045
3046
3047/**
3048 * Fetches the next opcode dword and zero extends it to a quad word, returns
3049 * automatically on failure.
3050 *
3051 * @param a_pu64 Where to return the opcode quad word.
3052 * @remark Implicitly references pVCpu.
3053 */
3054#ifndef IEM_WITH_SETJMP
3055# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3056 do \
3057 { \
3058 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3059 if (rcStrict2 != VINF_SUCCESS) \
3060 return rcStrict2; \
3061 } while (0)
3062#else
3063# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3064#endif
3065
3066
3067#ifndef IEM_WITH_SETJMP
3068/**
3069 * Fetches the next signed double word from the opcode stream.
3070 *
3071 * @returns Strict VBox status code.
3072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3073 * @param pi32 Where to return the signed double word.
3074 */
3075DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3076{
3077 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3078}
3079#endif
3080
3081/**
3082 * Fetches the next signed double word from the opcode stream, returning
3083 * automatically on failure.
3084 *
3085 * @param a_pi32 Where to return the signed double word.
3086 * @remark Implicitly references pVCpu.
3087 */
3088#ifndef IEM_WITH_SETJMP
3089# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3090 do \
3091 { \
3092 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3093 if (rcStrict2 != VINF_SUCCESS) \
3094 return rcStrict2; \
3095 } while (0)
3096#else
3097# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3098#endif
3099
3100#ifndef IEM_WITH_SETJMP
3101
3102/**
3103 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3104 *
3105 * @returns Strict VBox status code.
3106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3107 * @param pu64 Where to return the opcode qword.
3108 */
3109DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3110{
3111 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3112 if (rcStrict == VINF_SUCCESS)
3113 {
3114 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3115 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3116 pVCpu->iem.s.abOpcode[offOpcode + 1],
3117 pVCpu->iem.s.abOpcode[offOpcode + 2],
3118 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3119 pVCpu->iem.s.offOpcode = offOpcode + 4;
3120 }
3121 else
3122 *pu64 = 0;
3123 return rcStrict;
3124}
3125
3126
3127/**
3128 * Fetches the next opcode dword, sign extending it into a quad word.
3129 *
3130 * @returns Strict VBox status code.
3131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3132 * @param pu64 Where to return the opcode quad word.
3133 */
3134DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3135{
3136 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3137 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3138 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3139
3140 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3141 pVCpu->iem.s.abOpcode[offOpcode + 1],
3142 pVCpu->iem.s.abOpcode[offOpcode + 2],
3143 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3144 *pu64 = i32;
3145 pVCpu->iem.s.offOpcode = offOpcode + 4;
3146 return VINF_SUCCESS;
3147}
3148
3149#endif /* !IEM_WITH_SETJMP */
3150
3151
3152/**
3153 * Fetches the next opcode double word and sign extends it to a quad word,
3154 * returns automatically on failure.
3155 *
3156 * @param a_pu64 Where to return the opcode quad word.
3157 * @remark Implicitly references pVCpu.
3158 */
3159#ifndef IEM_WITH_SETJMP
3160# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3161 do \
3162 { \
3163 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3164 if (rcStrict2 != VINF_SUCCESS) \
3165 return rcStrict2; \
3166 } while (0)
3167#else
3168# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3169#endif
3170
3171#ifndef IEM_WITH_SETJMP
3172
3173/**
3174 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3175 *
3176 * @returns Strict VBox status code.
3177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3178 * @param pu64 Where to return the opcode qword.
3179 */
3180DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3181{
3182 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3183 if (rcStrict == VINF_SUCCESS)
3184 {
3185 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3186# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3187 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3188# else
3189 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3190 pVCpu->iem.s.abOpcode[offOpcode + 1],
3191 pVCpu->iem.s.abOpcode[offOpcode + 2],
3192 pVCpu->iem.s.abOpcode[offOpcode + 3],
3193 pVCpu->iem.s.abOpcode[offOpcode + 4],
3194 pVCpu->iem.s.abOpcode[offOpcode + 5],
3195 pVCpu->iem.s.abOpcode[offOpcode + 6],
3196 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3197# endif
3198 pVCpu->iem.s.offOpcode = offOpcode + 8;
3199 }
3200 else
3201 *pu64 = 0;
3202 return rcStrict;
3203}
3204
3205
3206/**
3207 * Fetches the next opcode qword.
3208 *
3209 * @returns Strict VBox status code.
3210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3211 * @param pu64 Where to return the opcode qword.
3212 */
3213DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3214{
3215 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3216 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3217 {
3218# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3219 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3220# else
3221 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3222 pVCpu->iem.s.abOpcode[offOpcode + 1],
3223 pVCpu->iem.s.abOpcode[offOpcode + 2],
3224 pVCpu->iem.s.abOpcode[offOpcode + 3],
3225 pVCpu->iem.s.abOpcode[offOpcode + 4],
3226 pVCpu->iem.s.abOpcode[offOpcode + 5],
3227 pVCpu->iem.s.abOpcode[offOpcode + 6],
3228 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3229# endif
3230 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3231 return VINF_SUCCESS;
3232 }
3233 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3234}
3235
3236#else /* IEM_WITH_SETJMP */
3237
3238/**
3239 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3240 *
3241 * @returns The opcode qword.
3242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3243 */
3244DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3245{
3246# ifdef IEM_WITH_CODE_TLB
3247 uint64_t u64;
3248 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3249 return u64;
3250# else
3251 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3252 if (rcStrict == VINF_SUCCESS)
3253 {
3254 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3255 pVCpu->iem.s.offOpcode = offOpcode + 8;
3256# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3257 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3258# else
3259 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3260 pVCpu->iem.s.abOpcode[offOpcode + 1],
3261 pVCpu->iem.s.abOpcode[offOpcode + 2],
3262 pVCpu->iem.s.abOpcode[offOpcode + 3],
3263 pVCpu->iem.s.abOpcode[offOpcode + 4],
3264 pVCpu->iem.s.abOpcode[offOpcode + 5],
3265 pVCpu->iem.s.abOpcode[offOpcode + 6],
3266 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3267# endif
3268 }
3269 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3270# endif
3271}
3272
3273
3274/**
3275 * Fetches the next opcode qword, longjmp on error.
3276 *
3277 * @returns The opcode qword.
3278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3279 */
3280DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3281{
3282# ifdef IEM_WITH_CODE_TLB
3283 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3284 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3285 if (RT_LIKELY( pbBuf != NULL
3286 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3287 {
3288 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3289# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3290 return *(uint64_t const *)&pbBuf[offBuf];
3291# else
3292 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3293 pbBuf[offBuf + 1],
3294 pbBuf[offBuf + 2],
3295 pbBuf[offBuf + 3],
3296 pbBuf[offBuf + 4],
3297 pbBuf[offBuf + 5],
3298 pbBuf[offBuf + 6],
3299 pbBuf[offBuf + 7]);
3300# endif
3301 }
3302# else
3303 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3304 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3305 {
3306 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3307# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3308 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3309# else
3310 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3311 pVCpu->iem.s.abOpcode[offOpcode + 1],
3312 pVCpu->iem.s.abOpcode[offOpcode + 2],
3313 pVCpu->iem.s.abOpcode[offOpcode + 3],
3314 pVCpu->iem.s.abOpcode[offOpcode + 4],
3315 pVCpu->iem.s.abOpcode[offOpcode + 5],
3316 pVCpu->iem.s.abOpcode[offOpcode + 6],
3317 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3318# endif
3319 }
3320# endif
3321 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3322}
3323
3324#endif /* IEM_WITH_SETJMP */
3325
3326/**
3327 * Fetches the next opcode quad word, returns automatically on failure.
3328 *
3329 * @param a_pu64 Where to return the opcode quad word.
3330 * @remark Implicitly references pVCpu.
3331 */
3332#ifndef IEM_WITH_SETJMP
3333# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3334 do \
3335 { \
3336 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3337 if (rcStrict2 != VINF_SUCCESS) \
3338 return rcStrict2; \
3339 } while (0)
3340#else
3341# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3342#endif
3343
3344
3345/** @name Misc Worker Functions.
3346 * @{
3347 */
3348
3349/**
3350 * Gets the exception class for the specified exception vector.
3351 *
3352 * @returns The class of the specified exception.
3353 * @param uVector The exception vector.
3354 */
3355IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3356{
3357 Assert(uVector <= X86_XCPT_LAST);
3358 switch (uVector)
3359 {
3360 case X86_XCPT_DE:
3361 case X86_XCPT_TS:
3362 case X86_XCPT_NP:
3363 case X86_XCPT_SS:
3364 case X86_XCPT_GP:
3365 case X86_XCPT_SX: /* AMD only */
3366 return IEMXCPTCLASS_CONTRIBUTORY;
3367
3368 case X86_XCPT_PF:
3369 case X86_XCPT_VE: /* Intel only */
3370 return IEMXCPTCLASS_PAGE_FAULT;
3371
3372 case X86_XCPT_DF:
3373 return IEMXCPTCLASS_DOUBLE_FAULT;
3374 }
3375 return IEMXCPTCLASS_BENIGN;
3376}
3377
3378
3379/**
3380 * Evaluates how to handle an exception caused during delivery of another event
3381 * (exception / interrupt).
3382 *
3383 * @returns How to handle the recursive exception.
3384 * @param pVCpu The cross context virtual CPU structure of the
3385 * calling thread.
3386 * @param fPrevFlags The flags of the previous event.
3387 * @param uPrevVector The vector of the previous event.
3388 * @param fCurFlags The flags of the current exception.
3389 * @param uCurVector The vector of the current exception.
3390 * @param pfXcptRaiseInfo Where to store additional information about the
3391 * exception condition. Optional.
3392 */
3393VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3394 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3395{
3396 /*
3397 * Only CPU exceptions can be raised while delivering other events, software interrupt
3398 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3399 */
3400 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3401 Assert(pVCpu); RT_NOREF(pVCpu);
3402 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3403
3404 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3405 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3406 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3407 {
3408 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3409 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3410 {
3411 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3412 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3413 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3414 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3415 {
3416 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3417 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3418 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3419 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3420 uCurVector, pVCpu->cpum.GstCtx.cr2));
3421 }
3422 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3423 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3424 {
3425 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3426 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3427 }
3428 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3429 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3430 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3431 {
3432 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3433 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3434 }
3435 }
3436 else
3437 {
3438 if (uPrevVector == X86_XCPT_NMI)
3439 {
3440 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3441 if (uCurVector == X86_XCPT_PF)
3442 {
3443 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3444 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3445 }
3446 }
3447 else if ( uPrevVector == X86_XCPT_AC
3448 && uCurVector == X86_XCPT_AC)
3449 {
3450 enmRaise = IEMXCPTRAISE_CPU_HANG;
3451 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3452 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3453 }
3454 }
3455 }
3456 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3457 {
3458 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3459 if (uCurVector == X86_XCPT_PF)
3460 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3461 }
3462 else
3463 {
3464 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3465 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3466 }
3467
3468 if (pfXcptRaiseInfo)
3469 *pfXcptRaiseInfo = fRaiseInfo;
3470 return enmRaise;
3471}
3472
3473
3474/**
3475 * Enters the CPU shutdown state initiated by a triple fault or other
3476 * unrecoverable conditions.
3477 *
3478 * @returns Strict VBox status code.
3479 * @param pVCpu The cross context virtual CPU structure of the
3480 * calling thread.
3481 */
3482IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3483{
3484 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3485 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3486
3487 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3488 {
3489 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3490 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3491 }
3492
3493 RT_NOREF(pVCpu);
3494 return VINF_EM_TRIPLE_FAULT;
3495}
3496
3497
3498/**
3499 * Validates a new SS segment.
3500 *
3501 * @returns VBox strict status code.
3502 * @param pVCpu The cross context virtual CPU structure of the
3503 * calling thread.
3504 * @param NewSS The new SS selctor.
3505 * @param uCpl The CPL to load the stack for.
3506 * @param pDesc Where to return the descriptor.
3507 */
3508IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3509{
3510 /* Null selectors are not allowed (we're not called for dispatching
3511 interrupts with SS=0 in long mode). */
3512 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3513 {
3514 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3515 return iemRaiseTaskSwitchFault0(pVCpu);
3516 }
3517
3518 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3519 if ((NewSS & X86_SEL_RPL) != uCpl)
3520 {
3521 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3522 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3523 }
3524
3525 /*
3526 * Read the descriptor.
3527 */
3528 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3529 if (rcStrict != VINF_SUCCESS)
3530 return rcStrict;
3531
3532 /*
3533 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3534 */
3535 if (!pDesc->Legacy.Gen.u1DescType)
3536 {
3537 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3538 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3539 }
3540
3541 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3542 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3543 {
3544 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3545 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3546 }
3547 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3548 {
3549 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3550 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3551 }
3552
3553 /* Is it there? */
3554 /** @todo testcase: Is this checked before the canonical / limit check below? */
3555 if (!pDesc->Legacy.Gen.u1Present)
3556 {
3557 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3558 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3559 }
3560
3561 return VINF_SUCCESS;
3562}
3563
3564
3565/**
3566 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3567 * not.
3568 *
3569 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3570 */
3571#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3572# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3573#else
3574# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3575#endif
3576
3577/**
3578 * Updates the EFLAGS in the correct manner wrt. PATM.
3579 *
3580 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3581 * @param a_fEfl The new EFLAGS.
3582 */
3583#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3584# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3585#else
3586# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3587#endif
3588
3589
3590/** @} */
3591
3592/** @name Raising Exceptions.
3593 *
3594 * @{
3595 */
3596
3597
3598/**
3599 * Loads the specified stack far pointer from the TSS.
3600 *
3601 * @returns VBox strict status code.
3602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3603 * @param uCpl The CPL to load the stack for.
3604 * @param pSelSS Where to return the new stack segment.
3605 * @param puEsp Where to return the new stack pointer.
3606 */
3607IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3608{
3609 VBOXSTRICTRC rcStrict;
3610 Assert(uCpl < 4);
3611
3612 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3613 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3614 {
3615 /*
3616 * 16-bit TSS (X86TSS16).
3617 */
3618 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3619 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3620 {
3621 uint32_t off = uCpl * 4 + 2;
3622 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3623 {
3624 /** @todo check actual access pattern here. */
3625 uint32_t u32Tmp = 0; /* gcc maybe... */
3626 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3627 if (rcStrict == VINF_SUCCESS)
3628 {
3629 *puEsp = RT_LOWORD(u32Tmp);
3630 *pSelSS = RT_HIWORD(u32Tmp);
3631 return VINF_SUCCESS;
3632 }
3633 }
3634 else
3635 {
3636 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3637 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3638 }
3639 break;
3640 }
3641
3642 /*
3643 * 32-bit TSS (X86TSS32).
3644 */
3645 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3646 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3647 {
3648 uint32_t off = uCpl * 8 + 4;
3649 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3650 {
3651/** @todo check actual access pattern here. */
3652 uint64_t u64Tmp;
3653 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3654 if (rcStrict == VINF_SUCCESS)
3655 {
3656 *puEsp = u64Tmp & UINT32_MAX;
3657 *pSelSS = (RTSEL)(u64Tmp >> 32);
3658 return VINF_SUCCESS;
3659 }
3660 }
3661 else
3662 {
3663 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3664 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3665 }
3666 break;
3667 }
3668
3669 default:
3670 AssertFailed();
3671 rcStrict = VERR_IEM_IPE_4;
3672 break;
3673 }
3674
3675 *puEsp = 0; /* make gcc happy */
3676 *pSelSS = 0; /* make gcc happy */
3677 return rcStrict;
3678}
3679
3680
3681/**
3682 * Loads the specified stack pointer from the 64-bit TSS.
3683 *
3684 * @returns VBox strict status code.
3685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3686 * @param uCpl The CPL to load the stack for.
3687 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3688 * @param puRsp Where to return the new stack pointer.
3689 */
3690IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3691{
3692 Assert(uCpl < 4);
3693 Assert(uIst < 8);
3694 *puRsp = 0; /* make gcc happy */
3695
3696 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3697 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3698
3699 uint32_t off;
3700 if (uIst)
3701 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3702 else
3703 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3704 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3705 {
3706 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3707 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3708 }
3709
3710 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3711}
3712
3713
3714/**
3715 * Adjust the CPU state according to the exception being raised.
3716 *
3717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3718 * @param u8Vector The exception that has been raised.
3719 */
3720DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3721{
3722 switch (u8Vector)
3723 {
3724 case X86_XCPT_DB:
3725 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3726 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3727 break;
3728 /** @todo Read the AMD and Intel exception reference... */
3729 }
3730}
3731
3732
3733/**
3734 * Implements exceptions and interrupts for real mode.
3735 *
3736 * @returns VBox strict status code.
3737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3738 * @param cbInstr The number of bytes to offset rIP by in the return
3739 * address.
3740 * @param u8Vector The interrupt / exception vector number.
3741 * @param fFlags The flags.
3742 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3743 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3744 */
3745IEM_STATIC VBOXSTRICTRC
3746iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3747 uint8_t cbInstr,
3748 uint8_t u8Vector,
3749 uint32_t fFlags,
3750 uint16_t uErr,
3751 uint64_t uCr2)
3752{
3753 NOREF(uErr); NOREF(uCr2);
3754 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3755
3756 /*
3757 * Read the IDT entry.
3758 */
3759 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3760 {
3761 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3762 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3763 }
3764 RTFAR16 Idte;
3765 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3766 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3767 {
3768 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3769 return rcStrict;
3770 }
3771
3772 /*
3773 * Push the stack frame.
3774 */
3775 uint16_t *pu16Frame;
3776 uint64_t uNewRsp;
3777 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3778 if (rcStrict != VINF_SUCCESS)
3779 return rcStrict;
3780
3781 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3782#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3783 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3784 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3785 fEfl |= UINT16_C(0xf000);
3786#endif
3787 pu16Frame[2] = (uint16_t)fEfl;
3788 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3789 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3790 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3791 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3792 return rcStrict;
3793
3794 /*
3795 * Load the vector address into cs:ip and make exception specific state
3796 * adjustments.
3797 */
3798 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3799 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3800 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3801 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3802 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3803 pVCpu->cpum.GstCtx.rip = Idte.off;
3804 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3805 IEMMISC_SET_EFL(pVCpu, fEfl);
3806
3807 /** @todo do we actually do this in real mode? */
3808 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3809 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3810
3811 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3812}
3813
3814
3815/**
3816 * Loads a NULL data selector into when coming from V8086 mode.
3817 *
3818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3819 * @param pSReg Pointer to the segment register.
3820 */
3821IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3822{
3823 pSReg->Sel = 0;
3824 pSReg->ValidSel = 0;
3825 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3826 {
3827 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3828 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3829 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3830 }
3831 else
3832 {
3833 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3834 /** @todo check this on AMD-V */
3835 pSReg->u64Base = 0;
3836 pSReg->u32Limit = 0;
3837 }
3838}
3839
3840
3841/**
3842 * Loads a segment selector during a task switch in V8086 mode.
3843 *
3844 * @param pSReg Pointer to the segment register.
3845 * @param uSel The selector value to load.
3846 */
3847IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3848{
3849 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3850 pSReg->Sel = uSel;
3851 pSReg->ValidSel = uSel;
3852 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3853 pSReg->u64Base = uSel << 4;
3854 pSReg->u32Limit = 0xffff;
3855 pSReg->Attr.u = 0xf3;
3856}
3857
3858
3859/**
3860 * Loads a NULL data selector into a selector register, both the hidden and
3861 * visible parts, in protected mode.
3862 *
3863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3864 * @param pSReg Pointer to the segment register.
3865 * @param uRpl The RPL.
3866 */
3867IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3868{
3869 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3870 * data selector in protected mode. */
3871 pSReg->Sel = uRpl;
3872 pSReg->ValidSel = uRpl;
3873 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3874 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3875 {
3876 /* VT-x (Intel 3960x) observed doing something like this. */
3877 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3878 pSReg->u32Limit = UINT32_MAX;
3879 pSReg->u64Base = 0;
3880 }
3881 else
3882 {
3883 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3884 pSReg->u32Limit = 0;
3885 pSReg->u64Base = 0;
3886 }
3887}
3888
3889
3890/**
3891 * Loads a segment selector during a task switch in protected mode.
3892 *
3893 * In this task switch scenario, we would throw \#TS exceptions rather than
3894 * \#GPs.
3895 *
3896 * @returns VBox strict status code.
3897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3898 * @param pSReg Pointer to the segment register.
3899 * @param uSel The new selector value.
3900 *
3901 * @remarks This does _not_ handle CS or SS.
3902 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3903 */
3904IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3905{
3906 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3907
3908 /* Null data selector. */
3909 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3910 {
3911 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3912 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3913 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3914 return VINF_SUCCESS;
3915 }
3916
3917 /* Fetch the descriptor. */
3918 IEMSELDESC Desc;
3919 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3920 if (rcStrict != VINF_SUCCESS)
3921 {
3922 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3923 VBOXSTRICTRC_VAL(rcStrict)));
3924 return rcStrict;
3925 }
3926
3927 /* Must be a data segment or readable code segment. */
3928 if ( !Desc.Legacy.Gen.u1DescType
3929 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3930 {
3931 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3932 Desc.Legacy.Gen.u4Type));
3933 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3934 }
3935
3936 /* Check privileges for data segments and non-conforming code segments. */
3937 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3938 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3939 {
3940 /* The RPL and the new CPL must be less than or equal to the DPL. */
3941 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3942 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3943 {
3944 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3945 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3946 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3947 }
3948 }
3949
3950 /* Is it there? */
3951 if (!Desc.Legacy.Gen.u1Present)
3952 {
3953 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3954 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3955 }
3956
3957 /* The base and limit. */
3958 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3959 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3960
3961 /*
3962 * Ok, everything checked out fine. Now set the accessed bit before
3963 * committing the result into the registers.
3964 */
3965 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3966 {
3967 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3968 if (rcStrict != VINF_SUCCESS)
3969 return rcStrict;
3970 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3971 }
3972
3973 /* Commit */
3974 pSReg->Sel = uSel;
3975 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3976 pSReg->u32Limit = cbLimit;
3977 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3978 pSReg->ValidSel = uSel;
3979 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3980 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3981 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3982
3983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3984 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3985 return VINF_SUCCESS;
3986}
3987
3988
3989/**
3990 * Performs a task switch.
3991 *
3992 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3993 * caller is responsible for performing the necessary checks (like DPL, TSS
3994 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3995 * reference for JMP, CALL, IRET.
3996 *
3997 * If the task switch is the due to a software interrupt or hardware exception,
3998 * the caller is responsible for validating the TSS selector and descriptor. See
3999 * Intel Instruction reference for INT n.
4000 *
4001 * @returns VBox strict status code.
4002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4003 * @param enmTaskSwitch The cause of the task switch.
4004 * @param uNextEip The EIP effective after the task switch.
4005 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4006 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4007 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4008 * @param SelTSS The TSS selector of the new task.
4009 * @param pNewDescTSS Pointer to the new TSS descriptor.
4010 */
4011IEM_STATIC VBOXSTRICTRC
4012iemTaskSwitch(PVMCPU pVCpu,
4013 IEMTASKSWITCH enmTaskSwitch,
4014 uint32_t uNextEip,
4015 uint32_t fFlags,
4016 uint16_t uErr,
4017 uint64_t uCr2,
4018 RTSEL SelTSS,
4019 PIEMSELDESC pNewDescTSS)
4020{
4021 Assert(!IEM_IS_REAL_MODE(pVCpu));
4022 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4023 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4024
4025 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4026 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4027 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4028 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4029 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4030
4031 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4032 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4033
4034 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4035 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4036
4037 /* Update CR2 in case it's a page-fault. */
4038 /** @todo This should probably be done much earlier in IEM/PGM. See
4039 * @bugref{5653#c49}. */
4040 if (fFlags & IEM_XCPT_FLAGS_CR2)
4041 pVCpu->cpum.GstCtx.cr2 = uCr2;
4042
4043 /*
4044 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4045 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4046 */
4047 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4048 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4049 if (uNewTSSLimit < uNewTSSLimitMin)
4050 {
4051 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4052 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4053 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4054 }
4055
4056 /*
4057 * Task switches in VMX non-root mode always cause task switches.
4058 * The new TSS must have been read and validated (DPL, limits etc.) before a
4059 * task-switch VM-exit commences.
4060 *
4061 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4062 */
4063 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4064 {
4065 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4066 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4067 }
4068
4069 /*
4070 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4071 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4072 */
4073 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4074 {
4075 uint32_t const uExitInfo1 = SelTSS;
4076 uint32_t uExitInfo2 = uErr;
4077 switch (enmTaskSwitch)
4078 {
4079 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4080 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4081 default: break;
4082 }
4083 if (fFlags & IEM_XCPT_FLAGS_ERR)
4084 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4085 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4086 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4087
4088 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4089 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4090 RT_NOREF2(uExitInfo1, uExitInfo2);
4091 }
4092
4093 /*
4094 * Check the current TSS limit. The last written byte to the current TSS during the
4095 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4096 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4097 *
4098 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4099 * end up with smaller than "legal" TSS limits.
4100 */
4101 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4102 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4103 if (uCurTSSLimit < uCurTSSLimitMin)
4104 {
4105 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4106 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4107 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4108 }
4109
4110 /*
4111 * Verify that the new TSS can be accessed and map it. Map only the required contents
4112 * and not the entire TSS.
4113 */
4114 void *pvNewTSS;
4115 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4116 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4117 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4118 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4119 * not perform correct translation if this happens. See Intel spec. 7.2.1
4120 * "Task-State Segment" */
4121 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4122 if (rcStrict != VINF_SUCCESS)
4123 {
4124 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4125 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4126 return rcStrict;
4127 }
4128
4129 /*
4130 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4131 */
4132 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4133 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4134 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4135 {
4136 PX86DESC pDescCurTSS;
4137 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4138 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4139 if (rcStrict != VINF_SUCCESS)
4140 {
4141 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4142 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4143 return rcStrict;
4144 }
4145
4146 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4147 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4148 if (rcStrict != VINF_SUCCESS)
4149 {
4150 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4151 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4152 return rcStrict;
4153 }
4154
4155 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4156 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4157 {
4158 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4159 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4160 u32EFlags &= ~X86_EFL_NT;
4161 }
4162 }
4163
4164 /*
4165 * Save the CPU state into the current TSS.
4166 */
4167 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4168 if (GCPtrNewTSS == GCPtrCurTSS)
4169 {
4170 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4171 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4172 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4173 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4174 pVCpu->cpum.GstCtx.ldtr.Sel));
4175 }
4176 if (fIsNewTSS386)
4177 {
4178 /*
4179 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4180 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4181 */
4182 void *pvCurTSS32;
4183 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4184 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4185 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4186 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4187 if (rcStrict != VINF_SUCCESS)
4188 {
4189 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4190 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4191 return rcStrict;
4192 }
4193
4194 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4195 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4196 pCurTSS32->eip = uNextEip;
4197 pCurTSS32->eflags = u32EFlags;
4198 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4199 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4200 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4201 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4202 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4203 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4204 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4205 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4206 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4207 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4208 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4209 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4210 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4211 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4212
4213 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4214 if (rcStrict != VINF_SUCCESS)
4215 {
4216 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4217 VBOXSTRICTRC_VAL(rcStrict)));
4218 return rcStrict;
4219 }
4220 }
4221 else
4222 {
4223 /*
4224 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4225 */
4226 void *pvCurTSS16;
4227 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4228 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4229 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4230 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4231 if (rcStrict != VINF_SUCCESS)
4232 {
4233 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4234 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4235 return rcStrict;
4236 }
4237
4238 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4239 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4240 pCurTSS16->ip = uNextEip;
4241 pCurTSS16->flags = u32EFlags;
4242 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4243 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4244 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4245 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4246 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4247 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4248 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4249 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4250 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4251 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4252 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4253 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4254
4255 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4256 if (rcStrict != VINF_SUCCESS)
4257 {
4258 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4259 VBOXSTRICTRC_VAL(rcStrict)));
4260 return rcStrict;
4261 }
4262 }
4263
4264 /*
4265 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4266 */
4267 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4268 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4269 {
4270 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4271 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4272 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4273 }
4274
4275 /*
4276 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4277 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4278 */
4279 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4280 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4281 bool fNewDebugTrap;
4282 if (fIsNewTSS386)
4283 {
4284 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4285 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4286 uNewEip = pNewTSS32->eip;
4287 uNewEflags = pNewTSS32->eflags;
4288 uNewEax = pNewTSS32->eax;
4289 uNewEcx = pNewTSS32->ecx;
4290 uNewEdx = pNewTSS32->edx;
4291 uNewEbx = pNewTSS32->ebx;
4292 uNewEsp = pNewTSS32->esp;
4293 uNewEbp = pNewTSS32->ebp;
4294 uNewEsi = pNewTSS32->esi;
4295 uNewEdi = pNewTSS32->edi;
4296 uNewES = pNewTSS32->es;
4297 uNewCS = pNewTSS32->cs;
4298 uNewSS = pNewTSS32->ss;
4299 uNewDS = pNewTSS32->ds;
4300 uNewFS = pNewTSS32->fs;
4301 uNewGS = pNewTSS32->gs;
4302 uNewLdt = pNewTSS32->selLdt;
4303 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4304 }
4305 else
4306 {
4307 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4308 uNewCr3 = 0;
4309 uNewEip = pNewTSS16->ip;
4310 uNewEflags = pNewTSS16->flags;
4311 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4312 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4313 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4314 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4315 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4316 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4317 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4318 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4319 uNewES = pNewTSS16->es;
4320 uNewCS = pNewTSS16->cs;
4321 uNewSS = pNewTSS16->ss;
4322 uNewDS = pNewTSS16->ds;
4323 uNewFS = 0;
4324 uNewGS = 0;
4325 uNewLdt = pNewTSS16->selLdt;
4326 fNewDebugTrap = false;
4327 }
4328
4329 if (GCPtrNewTSS == GCPtrCurTSS)
4330 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4331 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4332
4333 /*
4334 * We're done accessing the new TSS.
4335 */
4336 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4337 if (rcStrict != VINF_SUCCESS)
4338 {
4339 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4340 return rcStrict;
4341 }
4342
4343 /*
4344 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4345 */
4346 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4347 {
4348 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4349 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4350 if (rcStrict != VINF_SUCCESS)
4351 {
4352 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4353 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4354 return rcStrict;
4355 }
4356
4357 /* Check that the descriptor indicates the new TSS is available (not busy). */
4358 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4359 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4360 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4361
4362 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4363 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4364 if (rcStrict != VINF_SUCCESS)
4365 {
4366 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4367 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4368 return rcStrict;
4369 }
4370 }
4371
4372 /*
4373 * From this point on, we're technically in the new task. We will defer exceptions
4374 * until the completion of the task switch but before executing any instructions in the new task.
4375 */
4376 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4377 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4378 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4379 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4380 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4381 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4382 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4383
4384 /* Set the busy bit in TR. */
4385 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4386 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4387 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4388 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4389 {
4390 uNewEflags |= X86_EFL_NT;
4391 }
4392
4393 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4394 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4395 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4396
4397 pVCpu->cpum.GstCtx.eip = uNewEip;
4398 pVCpu->cpum.GstCtx.eax = uNewEax;
4399 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4400 pVCpu->cpum.GstCtx.edx = uNewEdx;
4401 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4402 pVCpu->cpum.GstCtx.esp = uNewEsp;
4403 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4404 pVCpu->cpum.GstCtx.esi = uNewEsi;
4405 pVCpu->cpum.GstCtx.edi = uNewEdi;
4406
4407 uNewEflags &= X86_EFL_LIVE_MASK;
4408 uNewEflags |= X86_EFL_RA1_MASK;
4409 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4410
4411 /*
4412 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4413 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4414 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4415 */
4416 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4417 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4418
4419 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4420 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4421
4422 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4423 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4424
4425 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4426 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4427
4428 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4429 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4430
4431 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4432 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4433 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4434
4435 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4436 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4437 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4438 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4439
4440 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4441 {
4442 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4443 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4444 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4445 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4446 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4447 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4448 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4449 }
4450
4451 /*
4452 * Switch CR3 for the new task.
4453 */
4454 if ( fIsNewTSS386
4455 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4456 {
4457 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4458 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4459 AssertRCSuccessReturn(rc, rc);
4460
4461 /* Inform PGM. */
4462 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4463 AssertRCReturn(rc, rc);
4464 /* ignore informational status codes */
4465
4466 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4467 }
4468
4469 /*
4470 * Switch LDTR for the new task.
4471 */
4472 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4473 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4474 else
4475 {
4476 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4477
4478 IEMSELDESC DescNewLdt;
4479 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4480 if (rcStrict != VINF_SUCCESS)
4481 {
4482 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4483 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4484 return rcStrict;
4485 }
4486 if ( !DescNewLdt.Legacy.Gen.u1Present
4487 || DescNewLdt.Legacy.Gen.u1DescType
4488 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4489 {
4490 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4491 uNewLdt, DescNewLdt.Legacy.u));
4492 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4493 }
4494
4495 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4496 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4497 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4498 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4499 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4500 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4501 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4502 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4503 }
4504
4505 IEMSELDESC DescSS;
4506 if (IEM_IS_V86_MODE(pVCpu))
4507 {
4508 pVCpu->iem.s.uCpl = 3;
4509 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4510 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4511 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4512 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4513 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4514 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4515
4516 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4517 DescSS.Legacy.u = 0;
4518 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4519 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4520 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4521 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4522 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4523 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4524 DescSS.Legacy.Gen.u2Dpl = 3;
4525 }
4526 else
4527 {
4528 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4529
4530 /*
4531 * Load the stack segment for the new task.
4532 */
4533 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4534 {
4535 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4536 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4537 }
4538
4539 /* Fetch the descriptor. */
4540 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4541 if (rcStrict != VINF_SUCCESS)
4542 {
4543 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4544 VBOXSTRICTRC_VAL(rcStrict)));
4545 return rcStrict;
4546 }
4547
4548 /* SS must be a data segment and writable. */
4549 if ( !DescSS.Legacy.Gen.u1DescType
4550 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4551 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4552 {
4553 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4554 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4555 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4556 }
4557
4558 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4559 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4560 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4561 {
4562 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4563 uNewCpl));
4564 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4565 }
4566
4567 /* Is it there? */
4568 if (!DescSS.Legacy.Gen.u1Present)
4569 {
4570 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4571 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4572 }
4573
4574 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4575 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4576
4577 /* Set the accessed bit before committing the result into SS. */
4578 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4579 {
4580 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4581 if (rcStrict != VINF_SUCCESS)
4582 return rcStrict;
4583 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4584 }
4585
4586 /* Commit SS. */
4587 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4588 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4589 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4590 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4591 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4592 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4593 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4594
4595 /* CPL has changed, update IEM before loading rest of segments. */
4596 pVCpu->iem.s.uCpl = uNewCpl;
4597
4598 /*
4599 * Load the data segments for the new task.
4600 */
4601 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4602 if (rcStrict != VINF_SUCCESS)
4603 return rcStrict;
4604 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4605 if (rcStrict != VINF_SUCCESS)
4606 return rcStrict;
4607 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4608 if (rcStrict != VINF_SUCCESS)
4609 return rcStrict;
4610 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4611 if (rcStrict != VINF_SUCCESS)
4612 return rcStrict;
4613
4614 /*
4615 * Load the code segment for the new task.
4616 */
4617 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4618 {
4619 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4620 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4621 }
4622
4623 /* Fetch the descriptor. */
4624 IEMSELDESC DescCS;
4625 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4626 if (rcStrict != VINF_SUCCESS)
4627 {
4628 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4629 return rcStrict;
4630 }
4631
4632 /* CS must be a code segment. */
4633 if ( !DescCS.Legacy.Gen.u1DescType
4634 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4635 {
4636 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4637 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4638 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4639 }
4640
4641 /* For conforming CS, DPL must be less than or equal to the RPL. */
4642 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4643 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4644 {
4645 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4646 DescCS.Legacy.Gen.u2Dpl));
4647 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4648 }
4649
4650 /* For non-conforming CS, DPL must match RPL. */
4651 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4652 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4653 {
4654 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4655 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4656 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4657 }
4658
4659 /* Is it there? */
4660 if (!DescCS.Legacy.Gen.u1Present)
4661 {
4662 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4663 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4664 }
4665
4666 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4667 u64Base = X86DESC_BASE(&DescCS.Legacy);
4668
4669 /* Set the accessed bit before committing the result into CS. */
4670 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4671 {
4672 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4673 if (rcStrict != VINF_SUCCESS)
4674 return rcStrict;
4675 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4676 }
4677
4678 /* Commit CS. */
4679 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4680 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4681 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4682 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4683 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4684 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4685 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4686 }
4687
4688 /** @todo Debug trap. */
4689 if (fIsNewTSS386 && fNewDebugTrap)
4690 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4691
4692 /*
4693 * Construct the error code masks based on what caused this task switch.
4694 * See Intel Instruction reference for INT.
4695 */
4696 uint16_t uExt;
4697 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4698 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4699 {
4700 uExt = 1;
4701 }
4702 else
4703 uExt = 0;
4704
4705 /*
4706 * Push any error code on to the new stack.
4707 */
4708 if (fFlags & IEM_XCPT_FLAGS_ERR)
4709 {
4710 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4711 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4712 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4713
4714 /* Check that there is sufficient space on the stack. */
4715 /** @todo Factor out segment limit checking for normal/expand down segments
4716 * into a separate function. */
4717 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4718 {
4719 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4720 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4721 {
4722 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4723 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4724 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4725 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4726 }
4727 }
4728 else
4729 {
4730 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4731 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4732 {
4733 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4734 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4735 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4736 }
4737 }
4738
4739
4740 if (fIsNewTSS386)
4741 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4742 else
4743 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4744 if (rcStrict != VINF_SUCCESS)
4745 {
4746 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4747 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4748 return rcStrict;
4749 }
4750 }
4751
4752 /* Check the new EIP against the new CS limit. */
4753 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4754 {
4755 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4756 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4757 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4758 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4759 }
4760
4761 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4762 pVCpu->cpum.GstCtx.ss.Sel));
4763 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4764}
4765
4766
4767/**
4768 * Implements exceptions and interrupts for protected mode.
4769 *
4770 * @returns VBox strict status code.
4771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4772 * @param cbInstr The number of bytes to offset rIP by in the return
4773 * address.
4774 * @param u8Vector The interrupt / exception vector number.
4775 * @param fFlags The flags.
4776 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4777 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4778 */
4779IEM_STATIC VBOXSTRICTRC
4780iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4781 uint8_t cbInstr,
4782 uint8_t u8Vector,
4783 uint32_t fFlags,
4784 uint16_t uErr,
4785 uint64_t uCr2)
4786{
4787 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4788
4789 /*
4790 * Read the IDT entry.
4791 */
4792 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4793 {
4794 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4795 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4796 }
4797 X86DESC Idte;
4798 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4799 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4800 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4801 {
4802 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4803 return rcStrict;
4804 }
4805 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4806 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4807 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4808
4809 /*
4810 * Check the descriptor type, DPL and such.
4811 * ASSUMES this is done in the same order as described for call-gate calls.
4812 */
4813 if (Idte.Gate.u1DescType)
4814 {
4815 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4816 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4817 }
4818 bool fTaskGate = false;
4819 uint8_t f32BitGate = true;
4820 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4821 switch (Idte.Gate.u4Type)
4822 {
4823 case X86_SEL_TYPE_SYS_UNDEFINED:
4824 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4825 case X86_SEL_TYPE_SYS_LDT:
4826 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4827 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4828 case X86_SEL_TYPE_SYS_UNDEFINED2:
4829 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4830 case X86_SEL_TYPE_SYS_UNDEFINED3:
4831 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4832 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4833 case X86_SEL_TYPE_SYS_UNDEFINED4:
4834 {
4835 /** @todo check what actually happens when the type is wrong...
4836 * esp. call gates. */
4837 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4838 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4839 }
4840
4841 case X86_SEL_TYPE_SYS_286_INT_GATE:
4842 f32BitGate = false;
4843 RT_FALL_THRU();
4844 case X86_SEL_TYPE_SYS_386_INT_GATE:
4845 fEflToClear |= X86_EFL_IF;
4846 break;
4847
4848 case X86_SEL_TYPE_SYS_TASK_GATE:
4849 fTaskGate = true;
4850#ifndef IEM_IMPLEMENTS_TASKSWITCH
4851 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4852#endif
4853 break;
4854
4855 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4856 f32BitGate = false;
4857 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4858 break;
4859
4860 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4861 }
4862
4863 /* Check DPL against CPL if applicable. */
4864 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4865 {
4866 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4869 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4870 }
4871 }
4872
4873 /* Is it there? */
4874 if (!Idte.Gate.u1Present)
4875 {
4876 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4877 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4878 }
4879
4880 /* Is it a task-gate? */
4881 if (fTaskGate)
4882 {
4883 /*
4884 * Construct the error code masks based on what caused this task switch.
4885 * See Intel Instruction reference for INT.
4886 */
4887 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4888 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4889 RTSEL SelTSS = Idte.Gate.u16Sel;
4890
4891 /*
4892 * Fetch the TSS descriptor in the GDT.
4893 */
4894 IEMSELDESC DescTSS;
4895 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4896 if (rcStrict != VINF_SUCCESS)
4897 {
4898 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4899 VBOXSTRICTRC_VAL(rcStrict)));
4900 return rcStrict;
4901 }
4902
4903 /* The TSS descriptor must be a system segment and be available (not busy). */
4904 if ( DescTSS.Legacy.Gen.u1DescType
4905 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4906 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4907 {
4908 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4909 u8Vector, SelTSS, DescTSS.Legacy.au64));
4910 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4911 }
4912
4913 /* The TSS must be present. */
4914 if (!DescTSS.Legacy.Gen.u1Present)
4915 {
4916 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4917 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4918 }
4919
4920 /* Do the actual task switch. */
4921 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4922 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4923 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4924 }
4925
4926 /* A null CS is bad. */
4927 RTSEL NewCS = Idte.Gate.u16Sel;
4928 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4929 {
4930 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4931 return iemRaiseGeneralProtectionFault0(pVCpu);
4932 }
4933
4934 /* Fetch the descriptor for the new CS. */
4935 IEMSELDESC DescCS;
4936 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4937 if (rcStrict != VINF_SUCCESS)
4938 {
4939 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4940 return rcStrict;
4941 }
4942
4943 /* Must be a code segment. */
4944 if (!DescCS.Legacy.Gen.u1DescType)
4945 {
4946 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4947 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4948 }
4949 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4950 {
4951 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4952 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4953 }
4954
4955 /* Don't allow lowering the privilege level. */
4956 /** @todo Does the lowering of privileges apply to software interrupts
4957 * only? This has bearings on the more-privileged or
4958 * same-privilege stack behavior further down. A testcase would
4959 * be nice. */
4960 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4961 {
4962 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4963 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4964 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4965 }
4966
4967 /* Make sure the selector is present. */
4968 if (!DescCS.Legacy.Gen.u1Present)
4969 {
4970 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4971 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4972 }
4973
4974 /* Check the new EIP against the new CS limit. */
4975 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4976 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4977 ? Idte.Gate.u16OffsetLow
4978 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4979 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4980 if (uNewEip > cbLimitCS)
4981 {
4982 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4983 u8Vector, uNewEip, cbLimitCS, NewCS));
4984 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4985 }
4986 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4987
4988 /* Calc the flag image to push. */
4989 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4990 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4991 fEfl &= ~X86_EFL_RF;
4992 else
4993 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4994
4995 /* From V8086 mode only go to CPL 0. */
4996 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4997 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4998 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4999 {
5000 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5001 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5002 }
5003
5004 /*
5005 * If the privilege level changes, we need to get a new stack from the TSS.
5006 * This in turns means validating the new SS and ESP...
5007 */
5008 if (uNewCpl != pVCpu->iem.s.uCpl)
5009 {
5010 RTSEL NewSS;
5011 uint32_t uNewEsp;
5012 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5013 if (rcStrict != VINF_SUCCESS)
5014 return rcStrict;
5015
5016 IEMSELDESC DescSS;
5017 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5021 if (!DescSS.Legacy.Gen.u1DefBig)
5022 {
5023 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5024 uNewEsp = (uint16_t)uNewEsp;
5025 }
5026
5027 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5028
5029 /* Check that there is sufficient space for the stack frame. */
5030 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5031 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5032 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5033 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5034
5035 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5036 {
5037 if ( uNewEsp - 1 > cbLimitSS
5038 || uNewEsp < cbStackFrame)
5039 {
5040 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5041 u8Vector, NewSS, uNewEsp, cbStackFrame));
5042 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5043 }
5044 }
5045 else
5046 {
5047 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5048 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5049 {
5050 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5051 u8Vector, NewSS, uNewEsp, cbStackFrame));
5052 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5053 }
5054 }
5055
5056 /*
5057 * Start making changes.
5058 */
5059
5060 /* Set the new CPL so that stack accesses use it. */
5061 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5062 pVCpu->iem.s.uCpl = uNewCpl;
5063
5064 /* Create the stack frame. */
5065 RTPTRUNION uStackFrame;
5066 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5067 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5068 if (rcStrict != VINF_SUCCESS)
5069 return rcStrict;
5070 void * const pvStackFrame = uStackFrame.pv;
5071 if (f32BitGate)
5072 {
5073 if (fFlags & IEM_XCPT_FLAGS_ERR)
5074 *uStackFrame.pu32++ = uErr;
5075 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5076 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5077 uStackFrame.pu32[2] = fEfl;
5078 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5079 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5080 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5081 if (fEfl & X86_EFL_VM)
5082 {
5083 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5084 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5085 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5086 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5087 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5088 }
5089 }
5090 else
5091 {
5092 if (fFlags & IEM_XCPT_FLAGS_ERR)
5093 *uStackFrame.pu16++ = uErr;
5094 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5095 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5096 uStackFrame.pu16[2] = fEfl;
5097 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5098 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5099 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5100 if (fEfl & X86_EFL_VM)
5101 {
5102 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5103 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5104 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5105 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5106 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5107 }
5108 }
5109 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5110 if (rcStrict != VINF_SUCCESS)
5111 return rcStrict;
5112
5113 /* Mark the selectors 'accessed' (hope this is the correct time). */
5114 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5115 * after pushing the stack frame? (Write protect the gdt + stack to
5116 * find out.) */
5117 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5118 {
5119 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5120 if (rcStrict != VINF_SUCCESS)
5121 return rcStrict;
5122 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5123 }
5124
5125 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5126 {
5127 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5128 if (rcStrict != VINF_SUCCESS)
5129 return rcStrict;
5130 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5131 }
5132
5133 /*
5134 * Start comitting the register changes (joins with the DPL=CPL branch).
5135 */
5136 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5137 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5138 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5139 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5140 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5141 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5142 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5143 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5144 * SP is loaded).
5145 * Need to check the other combinations too:
5146 * - 16-bit TSS, 32-bit handler
5147 * - 32-bit TSS, 16-bit handler */
5148 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5149 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5150 else
5151 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5152
5153 if (fEfl & X86_EFL_VM)
5154 {
5155 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5156 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5157 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5158 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5159 }
5160 }
5161 /*
5162 * Same privilege, no stack change and smaller stack frame.
5163 */
5164 else
5165 {
5166 uint64_t uNewRsp;
5167 RTPTRUNION uStackFrame;
5168 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5169 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5170 if (rcStrict != VINF_SUCCESS)
5171 return rcStrict;
5172 void * const pvStackFrame = uStackFrame.pv;
5173
5174 if (f32BitGate)
5175 {
5176 if (fFlags & IEM_XCPT_FLAGS_ERR)
5177 *uStackFrame.pu32++ = uErr;
5178 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5179 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5180 uStackFrame.pu32[2] = fEfl;
5181 }
5182 else
5183 {
5184 if (fFlags & IEM_XCPT_FLAGS_ERR)
5185 *uStackFrame.pu16++ = uErr;
5186 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5187 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5188 uStackFrame.pu16[2] = fEfl;
5189 }
5190 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5191 if (rcStrict != VINF_SUCCESS)
5192 return rcStrict;
5193
5194 /* Mark the CS selector as 'accessed'. */
5195 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5196 {
5197 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5198 if (rcStrict != VINF_SUCCESS)
5199 return rcStrict;
5200 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5201 }
5202
5203 /*
5204 * Start committing the register changes (joins with the other branch).
5205 */
5206 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5207 }
5208
5209 /* ... register committing continues. */
5210 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5211 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5212 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5213 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5214 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5215 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5216
5217 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5218 fEfl &= ~fEflToClear;
5219 IEMMISC_SET_EFL(pVCpu, fEfl);
5220
5221 if (fFlags & IEM_XCPT_FLAGS_CR2)
5222 pVCpu->cpum.GstCtx.cr2 = uCr2;
5223
5224 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5225 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5226
5227 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5228}
5229
5230
5231/**
5232 * Implements exceptions and interrupts for long mode.
5233 *
5234 * @returns VBox strict status code.
5235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5236 * @param cbInstr The number of bytes to offset rIP by in the return
5237 * address.
5238 * @param u8Vector The interrupt / exception vector number.
5239 * @param fFlags The flags.
5240 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5241 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5242 */
5243IEM_STATIC VBOXSTRICTRC
5244iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5245 uint8_t cbInstr,
5246 uint8_t u8Vector,
5247 uint32_t fFlags,
5248 uint16_t uErr,
5249 uint64_t uCr2)
5250{
5251 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5252
5253 /*
5254 * Read the IDT entry.
5255 */
5256 uint16_t offIdt = (uint16_t)u8Vector << 4;
5257 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5258 {
5259 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5260 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5261 }
5262 X86DESC64 Idte;
5263 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5264 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5265 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5266 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5267 {
5268 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5269 return rcStrict;
5270 }
5271 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5272 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5273 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5274
5275 /*
5276 * Check the descriptor type, DPL and such.
5277 * ASSUMES this is done in the same order as described for call-gate calls.
5278 */
5279 if (Idte.Gate.u1DescType)
5280 {
5281 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5282 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5283 }
5284 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5285 switch (Idte.Gate.u4Type)
5286 {
5287 case AMD64_SEL_TYPE_SYS_INT_GATE:
5288 fEflToClear |= X86_EFL_IF;
5289 break;
5290 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5291 break;
5292
5293 default:
5294 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5295 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5296 }
5297
5298 /* Check DPL against CPL if applicable. */
5299 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5300 {
5301 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5302 {
5303 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5304 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5305 }
5306 }
5307
5308 /* Is it there? */
5309 if (!Idte.Gate.u1Present)
5310 {
5311 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5312 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5313 }
5314
5315 /* A null CS is bad. */
5316 RTSEL NewCS = Idte.Gate.u16Sel;
5317 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5318 {
5319 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5320 return iemRaiseGeneralProtectionFault0(pVCpu);
5321 }
5322
5323 /* Fetch the descriptor for the new CS. */
5324 IEMSELDESC DescCS;
5325 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5326 if (rcStrict != VINF_SUCCESS)
5327 {
5328 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5329 return rcStrict;
5330 }
5331
5332 /* Must be a 64-bit code segment. */
5333 if (!DescCS.Long.Gen.u1DescType)
5334 {
5335 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5336 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5337 }
5338 if ( !DescCS.Long.Gen.u1Long
5339 || DescCS.Long.Gen.u1DefBig
5340 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5341 {
5342 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5343 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5344 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5345 }
5346
5347 /* Don't allow lowering the privilege level. For non-conforming CS
5348 selectors, the CS.DPL sets the privilege level the trap/interrupt
5349 handler runs at. For conforming CS selectors, the CPL remains
5350 unchanged, but the CS.DPL must be <= CPL. */
5351 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5352 * when CPU in Ring-0. Result \#GP? */
5353 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5354 {
5355 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5356 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5357 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5358 }
5359
5360
5361 /* Make sure the selector is present. */
5362 if (!DescCS.Legacy.Gen.u1Present)
5363 {
5364 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5365 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5366 }
5367
5368 /* Check that the new RIP is canonical. */
5369 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5370 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5371 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5372 if (!IEM_IS_CANONICAL(uNewRip))
5373 {
5374 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5375 return iemRaiseGeneralProtectionFault0(pVCpu);
5376 }
5377
5378 /*
5379 * If the privilege level changes or if the IST isn't zero, we need to get
5380 * a new stack from the TSS.
5381 */
5382 uint64_t uNewRsp;
5383 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5384 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5385 if ( uNewCpl != pVCpu->iem.s.uCpl
5386 || Idte.Gate.u3IST != 0)
5387 {
5388 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5389 if (rcStrict != VINF_SUCCESS)
5390 return rcStrict;
5391 }
5392 else
5393 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5394 uNewRsp &= ~(uint64_t)0xf;
5395
5396 /*
5397 * Calc the flag image to push.
5398 */
5399 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5400 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5401 fEfl &= ~X86_EFL_RF;
5402 else
5403 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5404
5405 /*
5406 * Start making changes.
5407 */
5408 /* Set the new CPL so that stack accesses use it. */
5409 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5410 pVCpu->iem.s.uCpl = uNewCpl;
5411
5412 /* Create the stack frame. */
5413 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5414 RTPTRUNION uStackFrame;
5415 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5416 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5417 if (rcStrict != VINF_SUCCESS)
5418 return rcStrict;
5419 void * const pvStackFrame = uStackFrame.pv;
5420
5421 if (fFlags & IEM_XCPT_FLAGS_ERR)
5422 *uStackFrame.pu64++ = uErr;
5423 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5424 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5425 uStackFrame.pu64[2] = fEfl;
5426 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5427 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5428 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5429 if (rcStrict != VINF_SUCCESS)
5430 return rcStrict;
5431
5432 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5433 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5434 * after pushing the stack frame? (Write protect the gdt + stack to
5435 * find out.) */
5436 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5437 {
5438 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5439 if (rcStrict != VINF_SUCCESS)
5440 return rcStrict;
5441 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5442 }
5443
5444 /*
5445 * Start comitting the register changes.
5446 */
5447 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5448 * hidden registers when interrupting 32-bit or 16-bit code! */
5449 if (uNewCpl != uOldCpl)
5450 {
5451 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5452 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5453 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5454 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5455 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5456 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5457 }
5458 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5459 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5460 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5461 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5462 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5463 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5464 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5465 pVCpu->cpum.GstCtx.rip = uNewRip;
5466
5467 fEfl &= ~fEflToClear;
5468 IEMMISC_SET_EFL(pVCpu, fEfl);
5469
5470 if (fFlags & IEM_XCPT_FLAGS_CR2)
5471 pVCpu->cpum.GstCtx.cr2 = uCr2;
5472
5473 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5474 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5475
5476 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5477}
5478
5479
5480/**
5481 * Implements exceptions and interrupts.
5482 *
5483 * All exceptions and interrupts goes thru this function!
5484 *
5485 * @returns VBox strict status code.
5486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5487 * @param cbInstr The number of bytes to offset rIP by in the return
5488 * address.
5489 * @param u8Vector The interrupt / exception vector number.
5490 * @param fFlags The flags.
5491 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5492 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5493 */
5494DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5495iemRaiseXcptOrInt(PVMCPU pVCpu,
5496 uint8_t cbInstr,
5497 uint8_t u8Vector,
5498 uint32_t fFlags,
5499 uint16_t uErr,
5500 uint64_t uCr2)
5501{
5502 /*
5503 * Get all the state that we might need here.
5504 */
5505 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5506 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5507
5508#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5509 /*
5510 * Flush prefetch buffer
5511 */
5512 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5513#endif
5514
5515 /*
5516 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5517 */
5518 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5519 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5520 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5521 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5522 {
5523 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5524 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5525 u8Vector = X86_XCPT_GP;
5526 uErr = 0;
5527 }
5528#ifdef DBGFTRACE_ENABLED
5529 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5530 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5531 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5532#endif
5533
5534#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5535 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5536 {
5537 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5538 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5539 return rcStrict0;
5540 }
5541#endif
5542
5543#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5544 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5545 {
5546 /*
5547 * If the event is being injected as part of VMRUN, it isn't subject to event
5548 * intercepts in the nested-guest. However, secondary exceptions that occur
5549 * during injection of any event -are- subject to exception intercepts.
5550 *
5551 * See AMD spec. 15.20 "Event Injection".
5552 */
5553 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5554 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5555 else
5556 {
5557 /*
5558 * Check and handle if the event being raised is intercepted.
5559 */
5560 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5561 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5562 return rcStrict0;
5563 }
5564 }
5565#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5566
5567 /*
5568 * Do recursion accounting.
5569 */
5570 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5571 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5572 if (pVCpu->iem.s.cXcptRecursions == 0)
5573 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5574 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5575 else
5576 {
5577 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5578 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5579 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5580
5581 if (pVCpu->iem.s.cXcptRecursions >= 4)
5582 {
5583#ifdef DEBUG_bird
5584 AssertFailed();
5585#endif
5586 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5587 }
5588
5589 /*
5590 * Evaluate the sequence of recurring events.
5591 */
5592 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5593 NULL /* pXcptRaiseInfo */);
5594 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5595 { /* likely */ }
5596 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5597 {
5598 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5599 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5600 u8Vector = X86_XCPT_DF;
5601 uErr = 0;
5602 /** @todo NSTVMX: Do we need to do something here for VMX? */
5603 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5604 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5605 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5606 }
5607 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5608 {
5609 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5610 return iemInitiateCpuShutdown(pVCpu);
5611 }
5612 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5613 {
5614 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5615 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5616 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5617 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5618 return VERR_EM_GUEST_CPU_HANG;
5619 }
5620 else
5621 {
5622 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5623 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5624 return VERR_IEM_IPE_9;
5625 }
5626
5627 /*
5628 * The 'EXT' bit is set when an exception occurs during deliver of an external
5629 * event (such as an interrupt or earlier exception)[1]. Privileged software
5630 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5631 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5632 *
5633 * [1] - Intel spec. 6.13 "Error Code"
5634 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5635 * [3] - Intel Instruction reference for INT n.
5636 */
5637 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5638 && (fFlags & IEM_XCPT_FLAGS_ERR)
5639 && u8Vector != X86_XCPT_PF
5640 && u8Vector != X86_XCPT_DF)
5641 {
5642 uErr |= X86_TRAP_ERR_EXTERNAL;
5643 }
5644 }
5645
5646 pVCpu->iem.s.cXcptRecursions++;
5647 pVCpu->iem.s.uCurXcpt = u8Vector;
5648 pVCpu->iem.s.fCurXcpt = fFlags;
5649 pVCpu->iem.s.uCurXcptErr = uErr;
5650 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5651
5652 /*
5653 * Extensive logging.
5654 */
5655#if defined(LOG_ENABLED) && defined(IN_RING3)
5656 if (LogIs3Enabled())
5657 {
5658 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5659 PVM pVM = pVCpu->CTX_SUFF(pVM);
5660 char szRegs[4096];
5661 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5662 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5663 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5664 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5665 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5666 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5667 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5668 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5669 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5670 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5671 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5672 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5673 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5674 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5675 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5676 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5677 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5678 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5679 " efer=%016VR{efer}\n"
5680 " pat=%016VR{pat}\n"
5681 " sf_mask=%016VR{sf_mask}\n"
5682 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5683 " lstar=%016VR{lstar}\n"
5684 " star=%016VR{star} cstar=%016VR{cstar}\n"
5685 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5686 );
5687
5688 char szInstr[256];
5689 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5690 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5691 szInstr, sizeof(szInstr), NULL);
5692 Log3(("%s%s\n", szRegs, szInstr));
5693 }
5694#endif /* LOG_ENABLED */
5695
5696 /*
5697 * Call the mode specific worker function.
5698 */
5699 VBOXSTRICTRC rcStrict;
5700 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5701 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5702 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5703 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5704 else
5705 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5706
5707 /* Flush the prefetch buffer. */
5708#ifdef IEM_WITH_CODE_TLB
5709 pVCpu->iem.s.pbInstrBuf = NULL;
5710#else
5711 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5712#endif
5713
5714 /*
5715 * Unwind.
5716 */
5717 pVCpu->iem.s.cXcptRecursions--;
5718 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5719 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5720 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5721 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5722 pVCpu->iem.s.cXcptRecursions + 1));
5723 return rcStrict;
5724}
5725
5726#ifdef IEM_WITH_SETJMP
5727/**
5728 * See iemRaiseXcptOrInt. Will not return.
5729 */
5730IEM_STATIC DECL_NO_RETURN(void)
5731iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5732 uint8_t cbInstr,
5733 uint8_t u8Vector,
5734 uint32_t fFlags,
5735 uint16_t uErr,
5736 uint64_t uCr2)
5737{
5738 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5739 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5740}
5741#endif
5742
5743
5744/** \#DE - 00. */
5745DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5746{
5747 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5748}
5749
5750
5751/** \#DB - 01.
5752 * @note This automatically clear DR7.GD. */
5753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5754{
5755 /** @todo set/clear RF. */
5756 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5757 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5758}
5759
5760
5761/** \#BR - 05. */
5762DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5763{
5764 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5765}
5766
5767
5768/** \#UD - 06. */
5769DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5770{
5771 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5772}
5773
5774
5775/** \#NM - 07. */
5776DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5777{
5778 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5779}
5780
5781
5782/** \#TS(err) - 0a. */
5783DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5784{
5785 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5786}
5787
5788
5789/** \#TS(tr) - 0a. */
5790DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5791{
5792 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5793 pVCpu->cpum.GstCtx.tr.Sel, 0);
5794}
5795
5796
5797/** \#TS(0) - 0a. */
5798DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5799{
5800 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5801 0, 0);
5802}
5803
5804
5805/** \#TS(err) - 0a. */
5806DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5807{
5808 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5809 uSel & X86_SEL_MASK_OFF_RPL, 0);
5810}
5811
5812
5813/** \#NP(err) - 0b. */
5814DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5815{
5816 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5817}
5818
5819
5820/** \#NP(sel) - 0b. */
5821DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5822{
5823 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5824 uSel & ~X86_SEL_RPL, 0);
5825}
5826
5827
5828/** \#SS(seg) - 0c. */
5829DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5830{
5831 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5832 uSel & ~X86_SEL_RPL, 0);
5833}
5834
5835
5836/** \#SS(err) - 0c. */
5837DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5838{
5839 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5840}
5841
5842
5843/** \#GP(n) - 0d. */
5844DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5845{
5846 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5847}
5848
5849
5850/** \#GP(0) - 0d. */
5851DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5852{
5853 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5854}
5855
5856#ifdef IEM_WITH_SETJMP
5857/** \#GP(0) - 0d. */
5858DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5859{
5860 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5861}
5862#endif
5863
5864
5865/** \#GP(sel) - 0d. */
5866DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5867{
5868 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5869 Sel & ~X86_SEL_RPL, 0);
5870}
5871
5872
5873/** \#GP(0) - 0d. */
5874DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5875{
5876 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5877}
5878
5879
5880/** \#GP(sel) - 0d. */
5881DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5882{
5883 NOREF(iSegReg); NOREF(fAccess);
5884 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5885 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5886}
5887
5888#ifdef IEM_WITH_SETJMP
5889/** \#GP(sel) - 0d, longjmp. */
5890DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5891{
5892 NOREF(iSegReg); NOREF(fAccess);
5893 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5894 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5895}
5896#endif
5897
5898/** \#GP(sel) - 0d. */
5899DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5900{
5901 NOREF(Sel);
5902 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5903}
5904
5905#ifdef IEM_WITH_SETJMP
5906/** \#GP(sel) - 0d, longjmp. */
5907DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5908{
5909 NOREF(Sel);
5910 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5911}
5912#endif
5913
5914
5915/** \#GP(sel) - 0d. */
5916DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5917{
5918 NOREF(iSegReg); NOREF(fAccess);
5919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5920}
5921
5922#ifdef IEM_WITH_SETJMP
5923/** \#GP(sel) - 0d, longjmp. */
5924DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5925 uint32_t fAccess)
5926{
5927 NOREF(iSegReg); NOREF(fAccess);
5928 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5929}
5930#endif
5931
5932
5933/** \#PF(n) - 0e. */
5934DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5935{
5936 uint16_t uErr;
5937 switch (rc)
5938 {
5939 case VERR_PAGE_NOT_PRESENT:
5940 case VERR_PAGE_TABLE_NOT_PRESENT:
5941 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5942 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5943 uErr = 0;
5944 break;
5945
5946 default:
5947 AssertMsgFailed(("%Rrc\n", rc));
5948 RT_FALL_THRU();
5949 case VERR_ACCESS_DENIED:
5950 uErr = X86_TRAP_PF_P;
5951 break;
5952
5953 /** @todo reserved */
5954 }
5955
5956 if (pVCpu->iem.s.uCpl == 3)
5957 uErr |= X86_TRAP_PF_US;
5958
5959 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5960 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5961 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5962 uErr |= X86_TRAP_PF_ID;
5963
5964#if 0 /* This is so much non-sense, really. Why was it done like that? */
5965 /* Note! RW access callers reporting a WRITE protection fault, will clear
5966 the READ flag before calling. So, read-modify-write accesses (RW)
5967 can safely be reported as READ faults. */
5968 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5969 uErr |= X86_TRAP_PF_RW;
5970#else
5971 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5972 {
5973 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5974 uErr |= X86_TRAP_PF_RW;
5975 }
5976#endif
5977
5978 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5979 uErr, GCPtrWhere);
5980}
5981
5982#ifdef IEM_WITH_SETJMP
5983/** \#PF(n) - 0e, longjmp. */
5984IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5985{
5986 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5987}
5988#endif
5989
5990
5991/** \#MF(0) - 10. */
5992DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5993{
5994 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5995}
5996
5997
5998/** \#AC(0) - 11. */
5999DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6000{
6001 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6002}
6003
6004
6005/**
6006 * Macro for calling iemCImplRaiseDivideError().
6007 *
6008 * This enables us to add/remove arguments and force different levels of
6009 * inlining as we wish.
6010 *
6011 * @return Strict VBox status code.
6012 */
6013#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6014IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6015{
6016 NOREF(cbInstr);
6017 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6018}
6019
6020
6021/**
6022 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6023 *
6024 * This enables us to add/remove arguments and force different levels of
6025 * inlining as we wish.
6026 *
6027 * @return Strict VBox status code.
6028 */
6029#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6030IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6031{
6032 NOREF(cbInstr);
6033 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6034}
6035
6036
6037/**
6038 * Macro for calling iemCImplRaiseInvalidOpcode().
6039 *
6040 * This enables us to add/remove arguments and force different levels of
6041 * inlining as we wish.
6042 *
6043 * @return Strict VBox status code.
6044 */
6045#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6046IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6047{
6048 NOREF(cbInstr);
6049 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6050}
6051
6052
6053/** @} */
6054
6055
6056/*
6057 *
6058 * Helpers routines.
6059 * Helpers routines.
6060 * Helpers routines.
6061 *
6062 */
6063
6064/**
6065 * Recalculates the effective operand size.
6066 *
6067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6068 */
6069IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6070{
6071 switch (pVCpu->iem.s.enmCpuMode)
6072 {
6073 case IEMMODE_16BIT:
6074 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6075 break;
6076 case IEMMODE_32BIT:
6077 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6078 break;
6079 case IEMMODE_64BIT:
6080 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6081 {
6082 case 0:
6083 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6084 break;
6085 case IEM_OP_PRF_SIZE_OP:
6086 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6087 break;
6088 case IEM_OP_PRF_SIZE_REX_W:
6089 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6090 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6091 break;
6092 }
6093 break;
6094 default:
6095 AssertFailed();
6096 }
6097}
6098
6099
6100/**
6101 * Sets the default operand size to 64-bit and recalculates the effective
6102 * operand size.
6103 *
6104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6105 */
6106IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6107{
6108 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6109 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6110 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6111 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6112 else
6113 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6114}
6115
6116
6117/*
6118 *
6119 * Common opcode decoders.
6120 * Common opcode decoders.
6121 * Common opcode decoders.
6122 *
6123 */
6124//#include <iprt/mem.h>
6125
6126/**
6127 * Used to add extra details about a stub case.
6128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6129 */
6130IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6131{
6132#if defined(LOG_ENABLED) && defined(IN_RING3)
6133 PVM pVM = pVCpu->CTX_SUFF(pVM);
6134 char szRegs[4096];
6135 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6136 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6137 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6138 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6139 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6140 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6141 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6142 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6143 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6144 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6145 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6146 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6147 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6148 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6149 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6150 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6151 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6152 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6153 " efer=%016VR{efer}\n"
6154 " pat=%016VR{pat}\n"
6155 " sf_mask=%016VR{sf_mask}\n"
6156 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6157 " lstar=%016VR{lstar}\n"
6158 " star=%016VR{star} cstar=%016VR{cstar}\n"
6159 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6160 );
6161
6162 char szInstr[256];
6163 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6164 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6165 szInstr, sizeof(szInstr), NULL);
6166
6167 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6168#else
6169 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6170#endif
6171}
6172
6173/**
6174 * Complains about a stub.
6175 *
6176 * Providing two versions of this macro, one for daily use and one for use when
6177 * working on IEM.
6178 */
6179#if 0
6180# define IEMOP_BITCH_ABOUT_STUB() \
6181 do { \
6182 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6183 iemOpStubMsg2(pVCpu); \
6184 RTAssertPanic(); \
6185 } while (0)
6186#else
6187# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6188#endif
6189
6190/** Stubs an opcode. */
6191#define FNIEMOP_STUB(a_Name) \
6192 FNIEMOP_DEF(a_Name) \
6193 { \
6194 RT_NOREF_PV(pVCpu); \
6195 IEMOP_BITCH_ABOUT_STUB(); \
6196 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6197 } \
6198 typedef int ignore_semicolon
6199
6200/** Stubs an opcode. */
6201#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6202 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6203 { \
6204 RT_NOREF_PV(pVCpu); \
6205 RT_NOREF_PV(a_Name0); \
6206 IEMOP_BITCH_ABOUT_STUB(); \
6207 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6208 } \
6209 typedef int ignore_semicolon
6210
6211/** Stubs an opcode which currently should raise \#UD. */
6212#define FNIEMOP_UD_STUB(a_Name) \
6213 FNIEMOP_DEF(a_Name) \
6214 { \
6215 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6216 return IEMOP_RAISE_INVALID_OPCODE(); \
6217 } \
6218 typedef int ignore_semicolon
6219
6220/** Stubs an opcode which currently should raise \#UD. */
6221#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6222 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6223 { \
6224 RT_NOREF_PV(pVCpu); \
6225 RT_NOREF_PV(a_Name0); \
6226 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6227 return IEMOP_RAISE_INVALID_OPCODE(); \
6228 } \
6229 typedef int ignore_semicolon
6230
6231
6232
6233/** @name Register Access.
6234 * @{
6235 */
6236
6237/**
6238 * Gets a reference (pointer) to the specified hidden segment register.
6239 *
6240 * @returns Hidden register reference.
6241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6242 * @param iSegReg The segment register.
6243 */
6244IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6245{
6246 Assert(iSegReg < X86_SREG_COUNT);
6247 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6248 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6249
6250#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6251 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6252 { /* likely */ }
6253 else
6254 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6255#else
6256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6257#endif
6258 return pSReg;
6259}
6260
6261
6262/**
6263 * Ensures that the given hidden segment register is up to date.
6264 *
6265 * @returns Hidden register reference.
6266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6267 * @param pSReg The segment register.
6268 */
6269IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6270{
6271#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6272 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6273 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6274#else
6275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6276 NOREF(pVCpu);
6277#endif
6278 return pSReg;
6279}
6280
6281
6282/**
6283 * Gets a reference (pointer) to the specified segment register (the selector
6284 * value).
6285 *
6286 * @returns Pointer to the selector variable.
6287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6288 * @param iSegReg The segment register.
6289 */
6290DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6291{
6292 Assert(iSegReg < X86_SREG_COUNT);
6293 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6294 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6295}
6296
6297
6298/**
6299 * Fetches the selector value of a segment register.
6300 *
6301 * @returns The selector value.
6302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6303 * @param iSegReg The segment register.
6304 */
6305DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6306{
6307 Assert(iSegReg < X86_SREG_COUNT);
6308 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6309 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6310}
6311
6312
6313/**
6314 * Fetches the base address value of a segment register.
6315 *
6316 * @returns The selector value.
6317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6318 * @param iSegReg The segment register.
6319 */
6320DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6321{
6322 Assert(iSegReg < X86_SREG_COUNT);
6323 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6324 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6325}
6326
6327
6328/**
6329 * Gets a reference (pointer) to the specified general purpose register.
6330 *
6331 * @returns Register reference.
6332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6333 * @param iReg The general purpose register.
6334 */
6335DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6336{
6337 Assert(iReg < 16);
6338 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6339}
6340
6341
6342/**
6343 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6344 *
6345 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6346 *
6347 * @returns Register reference.
6348 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6349 * @param iReg The register.
6350 */
6351DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6352{
6353 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6354 {
6355 Assert(iReg < 16);
6356 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6357 }
6358 /* high 8-bit register. */
6359 Assert(iReg < 8);
6360 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6361}
6362
6363
6364/**
6365 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6366 *
6367 * @returns Register reference.
6368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6369 * @param iReg The register.
6370 */
6371DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6372{
6373 Assert(iReg < 16);
6374 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6375}
6376
6377
6378/**
6379 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6380 *
6381 * @returns Register reference.
6382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6383 * @param iReg The register.
6384 */
6385DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6386{
6387 Assert(iReg < 16);
6388 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6389}
6390
6391
6392/**
6393 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6394 *
6395 * @returns Register reference.
6396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6397 * @param iReg The register.
6398 */
6399DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6400{
6401 Assert(iReg < 64);
6402 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6403}
6404
6405
6406/**
6407 * Gets a reference (pointer) to the specified segment register's base address.
6408 *
6409 * @returns Segment register base address reference.
6410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6411 * @param iSegReg The segment selector.
6412 */
6413DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6414{
6415 Assert(iSegReg < X86_SREG_COUNT);
6416 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6417 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6418}
6419
6420
6421/**
6422 * Fetches the value of a 8-bit general purpose register.
6423 *
6424 * @returns The register value.
6425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6426 * @param iReg The register.
6427 */
6428DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6429{
6430 return *iemGRegRefU8(pVCpu, iReg);
6431}
6432
6433
6434/**
6435 * Fetches the value of a 16-bit general purpose register.
6436 *
6437 * @returns The register value.
6438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6439 * @param iReg The register.
6440 */
6441DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6442{
6443 Assert(iReg < 16);
6444 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6445}
6446
6447
6448/**
6449 * Fetches the value of a 32-bit general purpose register.
6450 *
6451 * @returns The register value.
6452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6453 * @param iReg The register.
6454 */
6455DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6456{
6457 Assert(iReg < 16);
6458 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6459}
6460
6461
6462/**
6463 * Fetches the value of a 64-bit general purpose register.
6464 *
6465 * @returns The register value.
6466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6467 * @param iReg The register.
6468 */
6469DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6470{
6471 Assert(iReg < 16);
6472 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6473}
6474
6475
6476/**
6477 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6478 *
6479 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6480 * segment limit.
6481 *
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 * @param offNextInstr The offset of the next instruction.
6484 */
6485IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6486{
6487 switch (pVCpu->iem.s.enmEffOpSize)
6488 {
6489 case IEMMODE_16BIT:
6490 {
6491 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6492 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6493 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6494 return iemRaiseGeneralProtectionFault0(pVCpu);
6495 pVCpu->cpum.GstCtx.rip = uNewIp;
6496 break;
6497 }
6498
6499 case IEMMODE_32BIT:
6500 {
6501 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6502 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6503
6504 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6505 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6506 return iemRaiseGeneralProtectionFault0(pVCpu);
6507 pVCpu->cpum.GstCtx.rip = uNewEip;
6508 break;
6509 }
6510
6511 case IEMMODE_64BIT:
6512 {
6513 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6514
6515 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6516 if (!IEM_IS_CANONICAL(uNewRip))
6517 return iemRaiseGeneralProtectionFault0(pVCpu);
6518 pVCpu->cpum.GstCtx.rip = uNewRip;
6519 break;
6520 }
6521
6522 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6523 }
6524
6525 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6526
6527#ifndef IEM_WITH_CODE_TLB
6528 /* Flush the prefetch buffer. */
6529 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6530#endif
6531
6532 return VINF_SUCCESS;
6533}
6534
6535
6536/**
6537 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6538 *
6539 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6540 * segment limit.
6541 *
6542 * @returns Strict VBox status code.
6543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6544 * @param offNextInstr The offset of the next instruction.
6545 */
6546IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6547{
6548 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6549
6550 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6551 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6552 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6553 return iemRaiseGeneralProtectionFault0(pVCpu);
6554 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6555 pVCpu->cpum.GstCtx.rip = uNewIp;
6556 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6557
6558#ifndef IEM_WITH_CODE_TLB
6559 /* Flush the prefetch buffer. */
6560 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6561#endif
6562
6563 return VINF_SUCCESS;
6564}
6565
6566
6567/**
6568 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6569 *
6570 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6571 * segment limit.
6572 *
6573 * @returns Strict VBox status code.
6574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6575 * @param offNextInstr The offset of the next instruction.
6576 */
6577IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6578{
6579 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6580
6581 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6582 {
6583 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6584
6585 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6586 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6587 return iemRaiseGeneralProtectionFault0(pVCpu);
6588 pVCpu->cpum.GstCtx.rip = uNewEip;
6589 }
6590 else
6591 {
6592 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6593
6594 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6595 if (!IEM_IS_CANONICAL(uNewRip))
6596 return iemRaiseGeneralProtectionFault0(pVCpu);
6597 pVCpu->cpum.GstCtx.rip = uNewRip;
6598 }
6599 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6600
6601#ifndef IEM_WITH_CODE_TLB
6602 /* Flush the prefetch buffer. */
6603 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6604#endif
6605
6606 return VINF_SUCCESS;
6607}
6608
6609
6610/**
6611 * Performs a near jump to the specified address.
6612 *
6613 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6614 * segment limit.
6615 *
6616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6617 * @param uNewRip The new RIP value.
6618 */
6619IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6620{
6621 switch (pVCpu->iem.s.enmEffOpSize)
6622 {
6623 case IEMMODE_16BIT:
6624 {
6625 Assert(uNewRip <= UINT16_MAX);
6626 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6627 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6628 return iemRaiseGeneralProtectionFault0(pVCpu);
6629 /** @todo Test 16-bit jump in 64-bit mode. */
6630 pVCpu->cpum.GstCtx.rip = uNewRip;
6631 break;
6632 }
6633
6634 case IEMMODE_32BIT:
6635 {
6636 Assert(uNewRip <= UINT32_MAX);
6637 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6638 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6639
6640 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6641 return iemRaiseGeneralProtectionFault0(pVCpu);
6642 pVCpu->cpum.GstCtx.rip = uNewRip;
6643 break;
6644 }
6645
6646 case IEMMODE_64BIT:
6647 {
6648 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6649
6650 if (!IEM_IS_CANONICAL(uNewRip))
6651 return iemRaiseGeneralProtectionFault0(pVCpu);
6652 pVCpu->cpum.GstCtx.rip = uNewRip;
6653 break;
6654 }
6655
6656 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6657 }
6658
6659 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6660
6661#ifndef IEM_WITH_CODE_TLB
6662 /* Flush the prefetch buffer. */
6663 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6664#endif
6665
6666 return VINF_SUCCESS;
6667}
6668
6669
6670/**
6671 * Get the address of the top of the stack.
6672 *
6673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6674 */
6675DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6676{
6677 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6678 return pVCpu->cpum.GstCtx.rsp;
6679 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6680 return pVCpu->cpum.GstCtx.esp;
6681 return pVCpu->cpum.GstCtx.sp;
6682}
6683
6684
6685/**
6686 * Updates the RIP/EIP/IP to point to the next instruction.
6687 *
6688 * This function leaves the EFLAGS.RF flag alone.
6689 *
6690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6691 * @param cbInstr The number of bytes to add.
6692 */
6693IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6694{
6695 switch (pVCpu->iem.s.enmCpuMode)
6696 {
6697 case IEMMODE_16BIT:
6698 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6699 pVCpu->cpum.GstCtx.eip += cbInstr;
6700 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6701 break;
6702
6703 case IEMMODE_32BIT:
6704 pVCpu->cpum.GstCtx.eip += cbInstr;
6705 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6706 break;
6707
6708 case IEMMODE_64BIT:
6709 pVCpu->cpum.GstCtx.rip += cbInstr;
6710 break;
6711 default: AssertFailed();
6712 }
6713}
6714
6715
6716#if 0
6717/**
6718 * Updates the RIP/EIP/IP to point to the next instruction.
6719 *
6720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6721 */
6722IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6723{
6724 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6725}
6726#endif
6727
6728
6729
6730/**
6731 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6732 *
6733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6734 * @param cbInstr The number of bytes to add.
6735 */
6736IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6737{
6738 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6739
6740 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6741#if ARCH_BITS >= 64
6742 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6743 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6744 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6745#else
6746 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6747 pVCpu->cpum.GstCtx.rip += cbInstr;
6748 else
6749 pVCpu->cpum.GstCtx.eip += cbInstr;
6750#endif
6751}
6752
6753
6754/**
6755 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6756 *
6757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6758 */
6759IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6760{
6761 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6762}
6763
6764
6765/**
6766 * Adds to the stack pointer.
6767 *
6768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6769 * @param cbToAdd The number of bytes to add (8-bit!).
6770 */
6771DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6772{
6773 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6774 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6775 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6776 pVCpu->cpum.GstCtx.esp += cbToAdd;
6777 else
6778 pVCpu->cpum.GstCtx.sp += cbToAdd;
6779}
6780
6781
6782/**
6783 * Subtracts from the stack pointer.
6784 *
6785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6786 * @param cbToSub The number of bytes to subtract (8-bit!).
6787 */
6788DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6789{
6790 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6791 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6792 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6793 pVCpu->cpum.GstCtx.esp -= cbToSub;
6794 else
6795 pVCpu->cpum.GstCtx.sp -= cbToSub;
6796}
6797
6798
6799/**
6800 * Adds to the temporary stack pointer.
6801 *
6802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6803 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6804 * @param cbToAdd The number of bytes to add (16-bit).
6805 */
6806DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6807{
6808 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6809 pTmpRsp->u += cbToAdd;
6810 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6811 pTmpRsp->DWords.dw0 += cbToAdd;
6812 else
6813 pTmpRsp->Words.w0 += cbToAdd;
6814}
6815
6816
6817/**
6818 * Subtracts from the temporary stack pointer.
6819 *
6820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6821 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6822 * @param cbToSub The number of bytes to subtract.
6823 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6824 * expecting that.
6825 */
6826DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6827{
6828 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6829 pTmpRsp->u -= cbToSub;
6830 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6831 pTmpRsp->DWords.dw0 -= cbToSub;
6832 else
6833 pTmpRsp->Words.w0 -= cbToSub;
6834}
6835
6836
6837/**
6838 * Calculates the effective stack address for a push of the specified size as
6839 * well as the new RSP value (upper bits may be masked).
6840 *
6841 * @returns Effective stack addressf for the push.
6842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6843 * @param cbItem The size of the stack item to pop.
6844 * @param puNewRsp Where to return the new RSP value.
6845 */
6846DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6847{
6848 RTUINT64U uTmpRsp;
6849 RTGCPTR GCPtrTop;
6850 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6851
6852 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6853 GCPtrTop = uTmpRsp.u -= cbItem;
6854 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6855 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6856 else
6857 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6858 *puNewRsp = uTmpRsp.u;
6859 return GCPtrTop;
6860}
6861
6862
6863/**
6864 * Gets the current stack pointer and calculates the value after a pop of the
6865 * specified size.
6866 *
6867 * @returns Current stack pointer.
6868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6869 * @param cbItem The size of the stack item to pop.
6870 * @param puNewRsp Where to return the new RSP value.
6871 */
6872DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6873{
6874 RTUINT64U uTmpRsp;
6875 RTGCPTR GCPtrTop;
6876 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6877
6878 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6879 {
6880 GCPtrTop = uTmpRsp.u;
6881 uTmpRsp.u += cbItem;
6882 }
6883 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6884 {
6885 GCPtrTop = uTmpRsp.DWords.dw0;
6886 uTmpRsp.DWords.dw0 += cbItem;
6887 }
6888 else
6889 {
6890 GCPtrTop = uTmpRsp.Words.w0;
6891 uTmpRsp.Words.w0 += cbItem;
6892 }
6893 *puNewRsp = uTmpRsp.u;
6894 return GCPtrTop;
6895}
6896
6897
6898/**
6899 * Calculates the effective stack address for a push of the specified size as
6900 * well as the new temporary RSP value (upper bits may be masked).
6901 *
6902 * @returns Effective stack addressf for the push.
6903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6904 * @param pTmpRsp The temporary stack pointer. This is updated.
6905 * @param cbItem The size of the stack item to pop.
6906 */
6907DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6908{
6909 RTGCPTR GCPtrTop;
6910
6911 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6912 GCPtrTop = pTmpRsp->u -= cbItem;
6913 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6914 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6915 else
6916 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6917 return GCPtrTop;
6918}
6919
6920
6921/**
6922 * Gets the effective stack address for a pop of the specified size and
6923 * calculates and updates the temporary RSP.
6924 *
6925 * @returns Current stack pointer.
6926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6927 * @param pTmpRsp The temporary stack pointer. This is updated.
6928 * @param cbItem The size of the stack item to pop.
6929 */
6930DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6931{
6932 RTGCPTR GCPtrTop;
6933 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6934 {
6935 GCPtrTop = pTmpRsp->u;
6936 pTmpRsp->u += cbItem;
6937 }
6938 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6939 {
6940 GCPtrTop = pTmpRsp->DWords.dw0;
6941 pTmpRsp->DWords.dw0 += cbItem;
6942 }
6943 else
6944 {
6945 GCPtrTop = pTmpRsp->Words.w0;
6946 pTmpRsp->Words.w0 += cbItem;
6947 }
6948 return GCPtrTop;
6949}
6950
6951/** @} */
6952
6953
6954/** @name FPU access and helpers.
6955 *
6956 * @{
6957 */
6958
6959
6960/**
6961 * Hook for preparing to use the host FPU.
6962 *
6963 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6964 *
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 */
6967DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6968{
6969#ifdef IN_RING3
6970 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6971#else
6972 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6973#endif
6974 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6975}
6976
6977
6978/**
6979 * Hook for preparing to use the host FPU for SSE.
6980 *
6981 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6982 *
6983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6984 */
6985DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6986{
6987 iemFpuPrepareUsage(pVCpu);
6988}
6989
6990
6991/**
6992 * Hook for preparing to use the host FPU for AVX.
6993 *
6994 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6995 *
6996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6997 */
6998DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6999{
7000 iemFpuPrepareUsage(pVCpu);
7001}
7002
7003
7004/**
7005 * Hook for actualizing the guest FPU state before the interpreter reads it.
7006 *
7007 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7008 *
7009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7010 */
7011DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7012{
7013#ifdef IN_RING3
7014 NOREF(pVCpu);
7015#else
7016 CPUMRZFpuStateActualizeForRead(pVCpu);
7017#endif
7018 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7019}
7020
7021
7022/**
7023 * Hook for actualizing the guest FPU state before the interpreter changes it.
7024 *
7025 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7026 *
7027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7028 */
7029DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7030{
7031#ifdef IN_RING3
7032 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7033#else
7034 CPUMRZFpuStateActualizeForChange(pVCpu);
7035#endif
7036 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7037}
7038
7039
7040/**
7041 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7042 * only.
7043 *
7044 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7045 *
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 */
7048DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7049{
7050#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7051 NOREF(pVCpu);
7052#else
7053 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7054#endif
7055 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7056}
7057
7058
7059/**
7060 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7061 * read+write.
7062 *
7063 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7064 *
7065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7066 */
7067DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7068{
7069#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7070 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7071#else
7072 CPUMRZFpuStateActualizeForChange(pVCpu);
7073#endif
7074 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7075}
7076
7077
7078/**
7079 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7080 * only.
7081 *
7082 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7083 *
7084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7085 */
7086DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7087{
7088#ifdef IN_RING3
7089 NOREF(pVCpu);
7090#else
7091 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7092#endif
7093 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7094}
7095
7096
7097/**
7098 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7099 * read+write.
7100 *
7101 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7102 *
7103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7104 */
7105DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7106{
7107#ifdef IN_RING3
7108 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7109#else
7110 CPUMRZFpuStateActualizeForChange(pVCpu);
7111#endif
7112 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7113}
7114
7115
7116/**
7117 * Stores a QNaN value into a FPU register.
7118 *
7119 * @param pReg Pointer to the register.
7120 */
7121DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7122{
7123 pReg->au32[0] = UINT32_C(0x00000000);
7124 pReg->au32[1] = UINT32_C(0xc0000000);
7125 pReg->au16[4] = UINT16_C(0xffff);
7126}
7127
7128
7129/**
7130 * Updates the FOP, FPU.CS and FPUIP registers.
7131 *
7132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7133 * @param pFpuCtx The FPU context.
7134 */
7135DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7136{
7137 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7138 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7139 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7140 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7141 {
7142 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7143 * happens in real mode here based on the fnsave and fnstenv images. */
7144 pFpuCtx->CS = 0;
7145 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7146 }
7147 else
7148 {
7149 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7150 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7151 }
7152}
7153
7154
7155/**
7156 * Updates the x87.DS and FPUDP registers.
7157 *
7158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7159 * @param pFpuCtx The FPU context.
7160 * @param iEffSeg The effective segment register.
7161 * @param GCPtrEff The effective address relative to @a iEffSeg.
7162 */
7163DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7164{
7165 RTSEL sel;
7166 switch (iEffSeg)
7167 {
7168 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7169 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7170 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7171 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7172 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7173 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7174 default:
7175 AssertMsgFailed(("%d\n", iEffSeg));
7176 sel = pVCpu->cpum.GstCtx.ds.Sel;
7177 }
7178 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7179 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7180 {
7181 pFpuCtx->DS = 0;
7182 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7183 }
7184 else
7185 {
7186 pFpuCtx->DS = sel;
7187 pFpuCtx->FPUDP = GCPtrEff;
7188 }
7189}
7190
7191
7192/**
7193 * Rotates the stack registers in the push direction.
7194 *
7195 * @param pFpuCtx The FPU context.
7196 * @remarks This is a complete waste of time, but fxsave stores the registers in
7197 * stack order.
7198 */
7199DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7200{
7201 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7202 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7203 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7204 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7205 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7206 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7207 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7208 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7209 pFpuCtx->aRegs[0].r80 = r80Tmp;
7210}
7211
7212
7213/**
7214 * Rotates the stack registers in the pop direction.
7215 *
7216 * @param pFpuCtx The FPU context.
7217 * @remarks This is a complete waste of time, but fxsave stores the registers in
7218 * stack order.
7219 */
7220DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7221{
7222 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7223 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7224 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7225 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7226 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7227 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7228 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7229 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7230 pFpuCtx->aRegs[7].r80 = r80Tmp;
7231}
7232
7233
7234/**
7235 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7236 * exception prevents it.
7237 *
7238 * @param pResult The FPU operation result to push.
7239 * @param pFpuCtx The FPU context.
7240 */
7241IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7242{
7243 /* Update FSW and bail if there are pending exceptions afterwards. */
7244 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7245 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7246 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7247 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7248 {
7249 pFpuCtx->FSW = fFsw;
7250 return;
7251 }
7252
7253 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7254 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7255 {
7256 /* All is fine, push the actual value. */
7257 pFpuCtx->FTW |= RT_BIT(iNewTop);
7258 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7259 }
7260 else if (pFpuCtx->FCW & X86_FCW_IM)
7261 {
7262 /* Masked stack overflow, push QNaN. */
7263 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7264 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7265 }
7266 else
7267 {
7268 /* Raise stack overflow, don't push anything. */
7269 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7270 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7271 return;
7272 }
7273
7274 fFsw &= ~X86_FSW_TOP_MASK;
7275 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7276 pFpuCtx->FSW = fFsw;
7277
7278 iemFpuRotateStackPush(pFpuCtx);
7279}
7280
7281
7282/**
7283 * Stores a result in a FPU register and updates the FSW and FTW.
7284 *
7285 * @param pFpuCtx The FPU context.
7286 * @param pResult The result to store.
7287 * @param iStReg Which FPU register to store it in.
7288 */
7289IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7290{
7291 Assert(iStReg < 8);
7292 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7293 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7294 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7295 pFpuCtx->FTW |= RT_BIT(iReg);
7296 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7297}
7298
7299
7300/**
7301 * Only updates the FPU status word (FSW) with the result of the current
7302 * instruction.
7303 *
7304 * @param pFpuCtx The FPU context.
7305 * @param u16FSW The FSW output of the current instruction.
7306 */
7307IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7308{
7309 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7310 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7311}
7312
7313
7314/**
7315 * Pops one item off the FPU stack if no pending exception prevents it.
7316 *
7317 * @param pFpuCtx The FPU context.
7318 */
7319IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7320{
7321 /* Check pending exceptions. */
7322 uint16_t uFSW = pFpuCtx->FSW;
7323 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7324 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7325 return;
7326
7327 /* TOP--. */
7328 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7329 uFSW &= ~X86_FSW_TOP_MASK;
7330 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7331 pFpuCtx->FSW = uFSW;
7332
7333 /* Mark the previous ST0 as empty. */
7334 iOldTop >>= X86_FSW_TOP_SHIFT;
7335 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7336
7337 /* Rotate the registers. */
7338 iemFpuRotateStackPop(pFpuCtx);
7339}
7340
7341
7342/**
7343 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7344 *
7345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7346 * @param pResult The FPU operation result to push.
7347 */
7348IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7349{
7350 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7351 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7352 iemFpuMaybePushResult(pResult, pFpuCtx);
7353}
7354
7355
7356/**
7357 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7358 * and sets FPUDP and FPUDS.
7359 *
7360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7361 * @param pResult The FPU operation result to push.
7362 * @param iEffSeg The effective segment register.
7363 * @param GCPtrEff The effective address relative to @a iEffSeg.
7364 */
7365IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7366{
7367 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7368 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7369 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7370 iemFpuMaybePushResult(pResult, pFpuCtx);
7371}
7372
7373
7374/**
7375 * Replace ST0 with the first value and push the second onto the FPU stack,
7376 * unless a pending exception prevents it.
7377 *
7378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7379 * @param pResult The FPU operation result to store and push.
7380 */
7381IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7382{
7383 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7384 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7385
7386 /* Update FSW and bail if there are pending exceptions afterwards. */
7387 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7388 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7389 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7390 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7391 {
7392 pFpuCtx->FSW = fFsw;
7393 return;
7394 }
7395
7396 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7397 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7398 {
7399 /* All is fine, push the actual value. */
7400 pFpuCtx->FTW |= RT_BIT(iNewTop);
7401 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7402 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7403 }
7404 else if (pFpuCtx->FCW & X86_FCW_IM)
7405 {
7406 /* Masked stack overflow, push QNaN. */
7407 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7408 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7409 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7410 }
7411 else
7412 {
7413 /* Raise stack overflow, don't push anything. */
7414 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7415 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7416 return;
7417 }
7418
7419 fFsw &= ~X86_FSW_TOP_MASK;
7420 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7421 pFpuCtx->FSW = fFsw;
7422
7423 iemFpuRotateStackPush(pFpuCtx);
7424}
7425
7426
7427/**
7428 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7429 * FOP.
7430 *
7431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7432 * @param pResult The result to store.
7433 * @param iStReg Which FPU register to store it in.
7434 */
7435IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7436{
7437 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7438 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7439 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7440}
7441
7442
7443/**
7444 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7445 * FOP, and then pops the stack.
7446 *
7447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7448 * @param pResult The result to store.
7449 * @param iStReg Which FPU register to store it in.
7450 */
7451IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7452{
7453 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7454 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7455 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7456 iemFpuMaybePopOne(pFpuCtx);
7457}
7458
7459
7460/**
7461 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7462 * FPUDP, and FPUDS.
7463 *
7464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7465 * @param pResult The result to store.
7466 * @param iStReg Which FPU register to store it in.
7467 * @param iEffSeg The effective memory operand selector register.
7468 * @param GCPtrEff The effective memory operand offset.
7469 */
7470IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7471 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7472{
7473 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7474 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7475 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7476 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7477}
7478
7479
7480/**
7481 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7482 * FPUDP, and FPUDS, and then pops the stack.
7483 *
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param pResult The result to store.
7486 * @param iStReg Which FPU register to store it in.
7487 * @param iEffSeg The effective memory operand selector register.
7488 * @param GCPtrEff The effective memory operand offset.
7489 */
7490IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7491 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7492{
7493 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7494 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7495 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7496 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7497 iemFpuMaybePopOne(pFpuCtx);
7498}
7499
7500
7501/**
7502 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7503 *
7504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7505 */
7506IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7507{
7508 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7509 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7510}
7511
7512
7513/**
7514 * Marks the specified stack register as free (for FFREE).
7515 *
7516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7517 * @param iStReg The register to free.
7518 */
7519IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7520{
7521 Assert(iStReg < 8);
7522 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7523 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7524 pFpuCtx->FTW &= ~RT_BIT(iReg);
7525}
7526
7527
7528/**
7529 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7530 *
7531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7532 */
7533IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7534{
7535 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7536 uint16_t uFsw = pFpuCtx->FSW;
7537 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7538 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7539 uFsw &= ~X86_FSW_TOP_MASK;
7540 uFsw |= uTop;
7541 pFpuCtx->FSW = uFsw;
7542}
7543
7544
7545/**
7546 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7547 *
7548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7549 */
7550IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7551{
7552 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7553 uint16_t uFsw = pFpuCtx->FSW;
7554 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7555 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7556 uFsw &= ~X86_FSW_TOP_MASK;
7557 uFsw |= uTop;
7558 pFpuCtx->FSW = uFsw;
7559}
7560
7561
7562/**
7563 * Updates the FSW, FOP, FPUIP, and FPUCS.
7564 *
7565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7566 * @param u16FSW The FSW from the current instruction.
7567 */
7568IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7569{
7570 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7571 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7572 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7573}
7574
7575
7576/**
7577 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7578 *
7579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7580 * @param u16FSW The FSW from the current instruction.
7581 */
7582IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7583{
7584 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7585 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7586 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7587 iemFpuMaybePopOne(pFpuCtx);
7588}
7589
7590
7591/**
7592 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7593 *
7594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7595 * @param u16FSW The FSW from the current instruction.
7596 * @param iEffSeg The effective memory operand selector register.
7597 * @param GCPtrEff The effective memory operand offset.
7598 */
7599IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7600{
7601 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7602 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7603 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7604 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7605}
7606
7607
7608/**
7609 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7610 *
7611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7612 * @param u16FSW The FSW from the current instruction.
7613 */
7614IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7615{
7616 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7617 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7618 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7619 iemFpuMaybePopOne(pFpuCtx);
7620 iemFpuMaybePopOne(pFpuCtx);
7621}
7622
7623
7624/**
7625 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7626 *
7627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7628 * @param u16FSW The FSW from the current instruction.
7629 * @param iEffSeg The effective memory operand selector register.
7630 * @param GCPtrEff The effective memory operand offset.
7631 */
7632IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7633{
7634 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7635 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7636 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7637 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7638 iemFpuMaybePopOne(pFpuCtx);
7639}
7640
7641
7642/**
7643 * Worker routine for raising an FPU stack underflow exception.
7644 *
7645 * @param pFpuCtx The FPU context.
7646 * @param iStReg The stack register being accessed.
7647 */
7648IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7649{
7650 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7651 if (pFpuCtx->FCW & X86_FCW_IM)
7652 {
7653 /* Masked underflow. */
7654 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7655 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7656 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7657 if (iStReg != UINT8_MAX)
7658 {
7659 pFpuCtx->FTW |= RT_BIT(iReg);
7660 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7661 }
7662 }
7663 else
7664 {
7665 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7666 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7667 }
7668}
7669
7670
7671/**
7672 * Raises a FPU stack underflow exception.
7673 *
7674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7675 * @param iStReg The destination register that should be loaded
7676 * with QNaN if \#IS is not masked. Specify
7677 * UINT8_MAX if none (like for fcom).
7678 */
7679DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7680{
7681 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7682 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7683 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7684}
7685
7686
7687DECL_NO_INLINE(IEM_STATIC, void)
7688iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7689{
7690 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7691 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7692 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7693 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7694}
7695
7696
7697DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7698{
7699 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7700 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7701 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7702 iemFpuMaybePopOne(pFpuCtx);
7703}
7704
7705
7706DECL_NO_INLINE(IEM_STATIC, void)
7707iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7708{
7709 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7710 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7711 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7712 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7713 iemFpuMaybePopOne(pFpuCtx);
7714}
7715
7716
7717DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7718{
7719 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7720 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7721 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7722 iemFpuMaybePopOne(pFpuCtx);
7723 iemFpuMaybePopOne(pFpuCtx);
7724}
7725
7726
7727DECL_NO_INLINE(IEM_STATIC, void)
7728iemFpuStackPushUnderflow(PVMCPU pVCpu)
7729{
7730 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7731 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7732
7733 if (pFpuCtx->FCW & X86_FCW_IM)
7734 {
7735 /* Masked overflow - Push QNaN. */
7736 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7737 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7738 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7739 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7740 pFpuCtx->FTW |= RT_BIT(iNewTop);
7741 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7742 iemFpuRotateStackPush(pFpuCtx);
7743 }
7744 else
7745 {
7746 /* Exception pending - don't change TOP or the register stack. */
7747 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7748 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7749 }
7750}
7751
7752
7753DECL_NO_INLINE(IEM_STATIC, void)
7754iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7755{
7756 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7757 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7758
7759 if (pFpuCtx->FCW & X86_FCW_IM)
7760 {
7761 /* Masked overflow - Push QNaN. */
7762 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7763 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7764 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7765 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7766 pFpuCtx->FTW |= RT_BIT(iNewTop);
7767 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7768 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7769 iemFpuRotateStackPush(pFpuCtx);
7770 }
7771 else
7772 {
7773 /* Exception pending - don't change TOP or the register stack. */
7774 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7775 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7776 }
7777}
7778
7779
7780/**
7781 * Worker routine for raising an FPU stack overflow exception on a push.
7782 *
7783 * @param pFpuCtx The FPU context.
7784 */
7785IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7786{
7787 if (pFpuCtx->FCW & X86_FCW_IM)
7788 {
7789 /* Masked overflow. */
7790 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7791 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7792 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7793 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7794 pFpuCtx->FTW |= RT_BIT(iNewTop);
7795 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7796 iemFpuRotateStackPush(pFpuCtx);
7797 }
7798 else
7799 {
7800 /* Exception pending - don't change TOP or the register stack. */
7801 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7802 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7803 }
7804}
7805
7806
7807/**
7808 * Raises a FPU stack overflow exception on a push.
7809 *
7810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7811 */
7812DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7813{
7814 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7815 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7816 iemFpuStackPushOverflowOnly(pFpuCtx);
7817}
7818
7819
7820/**
7821 * Raises a FPU stack overflow exception on a push with a memory operand.
7822 *
7823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7824 * @param iEffSeg The effective memory operand selector register.
7825 * @param GCPtrEff The effective memory operand offset.
7826 */
7827DECL_NO_INLINE(IEM_STATIC, void)
7828iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7829{
7830 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7831 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7832 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7833 iemFpuStackPushOverflowOnly(pFpuCtx);
7834}
7835
7836
7837IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7838{
7839 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7840 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7841 if (pFpuCtx->FTW & RT_BIT(iReg))
7842 return VINF_SUCCESS;
7843 return VERR_NOT_FOUND;
7844}
7845
7846
7847IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7848{
7849 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7850 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7851 if (pFpuCtx->FTW & RT_BIT(iReg))
7852 {
7853 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7854 return VINF_SUCCESS;
7855 }
7856 return VERR_NOT_FOUND;
7857}
7858
7859
7860IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7861 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7862{
7863 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7864 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7865 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7866 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7867 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7868 {
7869 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7870 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7871 return VINF_SUCCESS;
7872 }
7873 return VERR_NOT_FOUND;
7874}
7875
7876
7877IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7878{
7879 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7880 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7881 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7882 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7883 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7884 {
7885 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7886 return VINF_SUCCESS;
7887 }
7888 return VERR_NOT_FOUND;
7889}
7890
7891
7892/**
7893 * Updates the FPU exception status after FCW is changed.
7894 *
7895 * @param pFpuCtx The FPU context.
7896 */
7897IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7898{
7899 uint16_t u16Fsw = pFpuCtx->FSW;
7900 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7901 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7902 else
7903 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7904 pFpuCtx->FSW = u16Fsw;
7905}
7906
7907
7908/**
7909 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7910 *
7911 * @returns The full FTW.
7912 * @param pFpuCtx The FPU context.
7913 */
7914IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7915{
7916 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7917 uint16_t u16Ftw = 0;
7918 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7919 for (unsigned iSt = 0; iSt < 8; iSt++)
7920 {
7921 unsigned const iReg = (iSt + iTop) & 7;
7922 if (!(u8Ftw & RT_BIT(iReg)))
7923 u16Ftw |= 3 << (iReg * 2); /* empty */
7924 else
7925 {
7926 uint16_t uTag;
7927 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7928 if (pr80Reg->s.uExponent == 0x7fff)
7929 uTag = 2; /* Exponent is all 1's => Special. */
7930 else if (pr80Reg->s.uExponent == 0x0000)
7931 {
7932 if (pr80Reg->s.u64Mantissa == 0x0000)
7933 uTag = 1; /* All bits are zero => Zero. */
7934 else
7935 uTag = 2; /* Must be special. */
7936 }
7937 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7938 uTag = 0; /* Valid. */
7939 else
7940 uTag = 2; /* Must be special. */
7941
7942 u16Ftw |= uTag << (iReg * 2); /* empty */
7943 }
7944 }
7945
7946 return u16Ftw;
7947}
7948
7949
7950/**
7951 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7952 *
7953 * @returns The compressed FTW.
7954 * @param u16FullFtw The full FTW to convert.
7955 */
7956IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7957{
7958 uint8_t u8Ftw = 0;
7959 for (unsigned i = 0; i < 8; i++)
7960 {
7961 if ((u16FullFtw & 3) != 3 /*empty*/)
7962 u8Ftw |= RT_BIT(i);
7963 u16FullFtw >>= 2;
7964 }
7965
7966 return u8Ftw;
7967}
7968
7969/** @} */
7970
7971
7972/** @name Memory access.
7973 *
7974 * @{
7975 */
7976
7977
7978/**
7979 * Updates the IEMCPU::cbWritten counter if applicable.
7980 *
7981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7982 * @param fAccess The access being accounted for.
7983 * @param cbMem The access size.
7984 */
7985DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7986{
7987 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7988 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7989 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7990}
7991
7992
7993/**
7994 * Checks if the given segment can be written to, raise the appropriate
7995 * exception if not.
7996 *
7997 * @returns VBox strict status code.
7998 *
7999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8000 * @param pHid Pointer to the hidden register.
8001 * @param iSegReg The register number.
8002 * @param pu64BaseAddr Where to return the base address to use for the
8003 * segment. (In 64-bit code it may differ from the
8004 * base in the hidden segment.)
8005 */
8006IEM_STATIC VBOXSTRICTRC
8007iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8008{
8009 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8010
8011 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8012 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8013 else
8014 {
8015 if (!pHid->Attr.n.u1Present)
8016 {
8017 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8018 AssertRelease(uSel == 0);
8019 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8020 return iemRaiseGeneralProtectionFault0(pVCpu);
8021 }
8022
8023 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8024 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8025 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8026 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8027 *pu64BaseAddr = pHid->u64Base;
8028 }
8029 return VINF_SUCCESS;
8030}
8031
8032
8033/**
8034 * Checks if the given segment can be read from, raise the appropriate
8035 * exception if not.
8036 *
8037 * @returns VBox strict status code.
8038 *
8039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8040 * @param pHid Pointer to the hidden register.
8041 * @param iSegReg The register number.
8042 * @param pu64BaseAddr Where to return the base address to use for the
8043 * segment. (In 64-bit code it may differ from the
8044 * base in the hidden segment.)
8045 */
8046IEM_STATIC VBOXSTRICTRC
8047iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8048{
8049 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8050
8051 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8052 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8053 else
8054 {
8055 if (!pHid->Attr.n.u1Present)
8056 {
8057 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8058 AssertRelease(uSel == 0);
8059 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8060 return iemRaiseGeneralProtectionFault0(pVCpu);
8061 }
8062
8063 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8064 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8065 *pu64BaseAddr = pHid->u64Base;
8066 }
8067 return VINF_SUCCESS;
8068}
8069
8070
8071/**
8072 * Applies the segment limit, base and attributes.
8073 *
8074 * This may raise a \#GP or \#SS.
8075 *
8076 * @returns VBox strict status code.
8077 *
8078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8079 * @param fAccess The kind of access which is being performed.
8080 * @param iSegReg The index of the segment register to apply.
8081 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8082 * TSS, ++).
8083 * @param cbMem The access size.
8084 * @param pGCPtrMem Pointer to the guest memory address to apply
8085 * segmentation to. Input and output parameter.
8086 */
8087IEM_STATIC VBOXSTRICTRC
8088iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8089{
8090 if (iSegReg == UINT8_MAX)
8091 return VINF_SUCCESS;
8092
8093 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8094 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8095 switch (pVCpu->iem.s.enmCpuMode)
8096 {
8097 case IEMMODE_16BIT:
8098 case IEMMODE_32BIT:
8099 {
8100 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8101 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8102
8103 if ( pSel->Attr.n.u1Present
8104 && !pSel->Attr.n.u1Unusable)
8105 {
8106 Assert(pSel->Attr.n.u1DescType);
8107 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8108 {
8109 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8110 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8111 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8112
8113 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8114 {
8115 /** @todo CPL check. */
8116 }
8117
8118 /*
8119 * There are two kinds of data selectors, normal and expand down.
8120 */
8121 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8122 {
8123 if ( GCPtrFirst32 > pSel->u32Limit
8124 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8125 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8126 }
8127 else
8128 {
8129 /*
8130 * The upper boundary is defined by the B bit, not the G bit!
8131 */
8132 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8133 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8134 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8135 }
8136 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8137 }
8138 else
8139 {
8140
8141 /*
8142 * Code selector and usually be used to read thru, writing is
8143 * only permitted in real and V8086 mode.
8144 */
8145 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8146 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8147 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8148 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8149 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8150
8151 if ( GCPtrFirst32 > pSel->u32Limit
8152 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8153 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8154
8155 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8156 {
8157 /** @todo CPL check. */
8158 }
8159
8160 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8161 }
8162 }
8163 else
8164 return iemRaiseGeneralProtectionFault0(pVCpu);
8165 return VINF_SUCCESS;
8166 }
8167
8168 case IEMMODE_64BIT:
8169 {
8170 RTGCPTR GCPtrMem = *pGCPtrMem;
8171 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8172 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8173
8174 Assert(cbMem >= 1);
8175 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8176 return VINF_SUCCESS;
8177 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8178 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8179 return iemRaiseGeneralProtectionFault0(pVCpu);
8180 }
8181
8182 default:
8183 AssertFailedReturn(VERR_IEM_IPE_7);
8184 }
8185}
8186
8187
8188/**
8189 * Translates a virtual address to a physical physical address and checks if we
8190 * can access the page as specified.
8191 *
8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8193 * @param GCPtrMem The virtual address.
8194 * @param fAccess The intended access.
8195 * @param pGCPhysMem Where to return the physical address.
8196 */
8197IEM_STATIC VBOXSTRICTRC
8198iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8199{
8200 /** @todo Need a different PGM interface here. We're currently using
8201 * generic / REM interfaces. this won't cut it for R0 & RC. */
8202 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8203 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8204 RTGCPHYS GCPhys;
8205 uint64_t fFlags;
8206 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8207 if (RT_FAILURE(rc))
8208 {
8209 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8210 /** @todo Check unassigned memory in unpaged mode. */
8211 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8212 *pGCPhysMem = NIL_RTGCPHYS;
8213 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8214 }
8215
8216 /* If the page is writable and does not have the no-exec bit set, all
8217 access is allowed. Otherwise we'll have to check more carefully... */
8218 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8219 {
8220 /* Write to read only memory? */
8221 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8222 && !(fFlags & X86_PTE_RW)
8223 && ( (pVCpu->iem.s.uCpl == 3
8224 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8225 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8226 {
8227 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8228 *pGCPhysMem = NIL_RTGCPHYS;
8229 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8230 }
8231
8232 /* Kernel memory accessed by userland? */
8233 if ( !(fFlags & X86_PTE_US)
8234 && pVCpu->iem.s.uCpl == 3
8235 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8236 {
8237 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8238 *pGCPhysMem = NIL_RTGCPHYS;
8239 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8240 }
8241
8242 /* Executing non-executable memory? */
8243 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8244 && (fFlags & X86_PTE_PAE_NX)
8245 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8246 {
8247 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8248 *pGCPhysMem = NIL_RTGCPHYS;
8249 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8250 VERR_ACCESS_DENIED);
8251 }
8252 }
8253
8254 /*
8255 * Set the dirty / access flags.
8256 * ASSUMES this is set when the address is translated rather than on committ...
8257 */
8258 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8259 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8260 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8261 {
8262 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8263 AssertRC(rc2);
8264 }
8265
8266 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8267 *pGCPhysMem = GCPhys;
8268 return VINF_SUCCESS;
8269}
8270
8271
8272
8273/**
8274 * Maps a physical page.
8275 *
8276 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8278 * @param GCPhysMem The physical address.
8279 * @param fAccess The intended access.
8280 * @param ppvMem Where to return the mapping address.
8281 * @param pLock The PGM lock.
8282 */
8283IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8284{
8285#ifdef IEM_LOG_MEMORY_WRITES
8286 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8287 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8288#endif
8289
8290 /** @todo This API may require some improving later. A private deal with PGM
8291 * regarding locking and unlocking needs to be struct. A couple of TLBs
8292 * living in PGM, but with publicly accessible inlined access methods
8293 * could perhaps be an even better solution. */
8294 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8295 GCPhysMem,
8296 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8297 pVCpu->iem.s.fBypassHandlers,
8298 ppvMem,
8299 pLock);
8300 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8301 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8302
8303 return rc;
8304}
8305
8306
8307/**
8308 * Unmap a page previously mapped by iemMemPageMap.
8309 *
8310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8311 * @param GCPhysMem The physical address.
8312 * @param fAccess The intended access.
8313 * @param pvMem What iemMemPageMap returned.
8314 * @param pLock The PGM lock.
8315 */
8316DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8317{
8318 NOREF(pVCpu);
8319 NOREF(GCPhysMem);
8320 NOREF(fAccess);
8321 NOREF(pvMem);
8322 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8323}
8324
8325
8326/**
8327 * Looks up a memory mapping entry.
8328 *
8329 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8331 * @param pvMem The memory address.
8332 * @param fAccess The access to.
8333 */
8334DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8335{
8336 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8337 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8338 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8339 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8340 return 0;
8341 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8342 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8343 return 1;
8344 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8345 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8346 return 2;
8347 return VERR_NOT_FOUND;
8348}
8349
8350
8351/**
8352 * Finds a free memmap entry when using iNextMapping doesn't work.
8353 *
8354 * @returns Memory mapping index, 1024 on failure.
8355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8356 */
8357IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8358{
8359 /*
8360 * The easy case.
8361 */
8362 if (pVCpu->iem.s.cActiveMappings == 0)
8363 {
8364 pVCpu->iem.s.iNextMapping = 1;
8365 return 0;
8366 }
8367
8368 /* There should be enough mappings for all instructions. */
8369 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8370
8371 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8372 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8373 return i;
8374
8375 AssertFailedReturn(1024);
8376}
8377
8378
8379/**
8380 * Commits a bounce buffer that needs writing back and unmaps it.
8381 *
8382 * @returns Strict VBox status code.
8383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8384 * @param iMemMap The index of the buffer to commit.
8385 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8386 * Always false in ring-3, obviously.
8387 */
8388IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8389{
8390 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8391 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8392#ifdef IN_RING3
8393 Assert(!fPostponeFail);
8394 RT_NOREF_PV(fPostponeFail);
8395#endif
8396
8397 /*
8398 * Do the writing.
8399 */
8400 PVM pVM = pVCpu->CTX_SUFF(pVM);
8401 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8402 {
8403 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8404 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8405 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8406 if (!pVCpu->iem.s.fBypassHandlers)
8407 {
8408 /*
8409 * Carefully and efficiently dealing with access handler return
8410 * codes make this a little bloated.
8411 */
8412 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8413 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8414 pbBuf,
8415 cbFirst,
8416 PGMACCESSORIGIN_IEM);
8417 if (rcStrict == VINF_SUCCESS)
8418 {
8419 if (cbSecond)
8420 {
8421 rcStrict = PGMPhysWrite(pVM,
8422 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8423 pbBuf + cbFirst,
8424 cbSecond,
8425 PGMACCESSORIGIN_IEM);
8426 if (rcStrict == VINF_SUCCESS)
8427 { /* nothing */ }
8428 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8429 {
8430 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8432 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8433 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8434 }
8435#ifndef IN_RING3
8436 else if (fPostponeFail)
8437 {
8438 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8440 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8441 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8442 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8443 return iemSetPassUpStatus(pVCpu, rcStrict);
8444 }
8445#endif
8446 else
8447 {
8448 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8451 return rcStrict;
8452 }
8453 }
8454 }
8455 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8456 {
8457 if (!cbSecond)
8458 {
8459 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8460 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8461 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8462 }
8463 else
8464 {
8465 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8466 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8467 pbBuf + cbFirst,
8468 cbSecond,
8469 PGMACCESSORIGIN_IEM);
8470 if (rcStrict2 == VINF_SUCCESS)
8471 {
8472 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8475 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8476 }
8477 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8478 {
8479 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8480 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8481 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8482 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8483 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8484 }
8485#ifndef IN_RING3
8486 else if (fPostponeFail)
8487 {
8488 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8489 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8490 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8491 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8492 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8493 return iemSetPassUpStatus(pVCpu, rcStrict);
8494 }
8495#endif
8496 else
8497 {
8498 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8499 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8500 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8501 return rcStrict2;
8502 }
8503 }
8504 }
8505#ifndef IN_RING3
8506 else if (fPostponeFail)
8507 {
8508 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8509 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8510 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8511 if (!cbSecond)
8512 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8513 else
8514 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8515 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8516 return iemSetPassUpStatus(pVCpu, rcStrict);
8517 }
8518#endif
8519 else
8520 {
8521 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8523 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8524 return rcStrict;
8525 }
8526 }
8527 else
8528 {
8529 /*
8530 * No access handlers, much simpler.
8531 */
8532 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8533 if (RT_SUCCESS(rc))
8534 {
8535 if (cbSecond)
8536 {
8537 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8538 if (RT_SUCCESS(rc))
8539 { /* likely */ }
8540 else
8541 {
8542 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8543 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8544 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8545 return rc;
8546 }
8547 }
8548 }
8549 else
8550 {
8551 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8552 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8553 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8554 return rc;
8555 }
8556 }
8557 }
8558
8559#if defined(IEM_LOG_MEMORY_WRITES)
8560 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8561 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8562 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8563 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8564 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8565 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8566
8567 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8568 g_cbIemWrote = cbWrote;
8569 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8570#endif
8571
8572 /*
8573 * Free the mapping entry.
8574 */
8575 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8576 Assert(pVCpu->iem.s.cActiveMappings != 0);
8577 pVCpu->iem.s.cActiveMappings--;
8578 return VINF_SUCCESS;
8579}
8580
8581
8582/**
8583 * iemMemMap worker that deals with a request crossing pages.
8584 */
8585IEM_STATIC VBOXSTRICTRC
8586iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8587{
8588 /*
8589 * Do the address translations.
8590 */
8591 RTGCPHYS GCPhysFirst;
8592 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8593 if (rcStrict != VINF_SUCCESS)
8594 return rcStrict;
8595
8596 RTGCPHYS GCPhysSecond;
8597 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8598 fAccess, &GCPhysSecond);
8599 if (rcStrict != VINF_SUCCESS)
8600 return rcStrict;
8601 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8602
8603 PVM pVM = pVCpu->CTX_SUFF(pVM);
8604
8605 /*
8606 * Read in the current memory content if it's a read, execute or partial
8607 * write access.
8608 */
8609 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8610 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8611 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8612
8613 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8614 {
8615 if (!pVCpu->iem.s.fBypassHandlers)
8616 {
8617 /*
8618 * Must carefully deal with access handler status codes here,
8619 * makes the code a bit bloated.
8620 */
8621 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8622 if (rcStrict == VINF_SUCCESS)
8623 {
8624 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8625 if (rcStrict == VINF_SUCCESS)
8626 { /*likely */ }
8627 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8628 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8629 else
8630 {
8631 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8632 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8633 return rcStrict;
8634 }
8635 }
8636 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8637 {
8638 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8639 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8640 {
8641 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8642 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8643 }
8644 else
8645 {
8646 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8647 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8648 return rcStrict2;
8649 }
8650 }
8651 else
8652 {
8653 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8654 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8655 return rcStrict;
8656 }
8657 }
8658 else
8659 {
8660 /*
8661 * No informational status codes here, much more straight forward.
8662 */
8663 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8664 if (RT_SUCCESS(rc))
8665 {
8666 Assert(rc == VINF_SUCCESS);
8667 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8668 if (RT_SUCCESS(rc))
8669 Assert(rc == VINF_SUCCESS);
8670 else
8671 {
8672 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8673 return rc;
8674 }
8675 }
8676 else
8677 {
8678 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8679 return rc;
8680 }
8681 }
8682 }
8683#ifdef VBOX_STRICT
8684 else
8685 memset(pbBuf, 0xcc, cbMem);
8686 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8687 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8688#endif
8689
8690 /*
8691 * Commit the bounce buffer entry.
8692 */
8693 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8694 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8695 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8696 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8697 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8698 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8699 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8700 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8701 pVCpu->iem.s.cActiveMappings++;
8702
8703 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8704 *ppvMem = pbBuf;
8705 return VINF_SUCCESS;
8706}
8707
8708
8709/**
8710 * iemMemMap woker that deals with iemMemPageMap failures.
8711 */
8712IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8713 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8714{
8715 /*
8716 * Filter out conditions we can handle and the ones which shouldn't happen.
8717 */
8718 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8719 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8720 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8721 {
8722 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8723 return rcMap;
8724 }
8725 pVCpu->iem.s.cPotentialExits++;
8726
8727 /*
8728 * Read in the current memory content if it's a read, execute or partial
8729 * write access.
8730 */
8731 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8732 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8733 {
8734 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8735 memset(pbBuf, 0xff, cbMem);
8736 else
8737 {
8738 int rc;
8739 if (!pVCpu->iem.s.fBypassHandlers)
8740 {
8741 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8742 if (rcStrict == VINF_SUCCESS)
8743 { /* nothing */ }
8744 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8745 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8746 else
8747 {
8748 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8749 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8750 return rcStrict;
8751 }
8752 }
8753 else
8754 {
8755 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8756 if (RT_SUCCESS(rc))
8757 { /* likely */ }
8758 else
8759 {
8760 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8761 GCPhysFirst, rc));
8762 return rc;
8763 }
8764 }
8765 }
8766 }
8767#ifdef VBOX_STRICT
8768 else
8769 memset(pbBuf, 0xcc, cbMem);
8770#endif
8771#ifdef VBOX_STRICT
8772 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8773 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8774#endif
8775
8776 /*
8777 * Commit the bounce buffer entry.
8778 */
8779 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8780 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8781 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8782 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8783 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8784 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8785 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8786 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8787 pVCpu->iem.s.cActiveMappings++;
8788
8789 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8790 *ppvMem = pbBuf;
8791 return VINF_SUCCESS;
8792}
8793
8794
8795
8796/**
8797 * Maps the specified guest memory for the given kind of access.
8798 *
8799 * This may be using bounce buffering of the memory if it's crossing a page
8800 * boundary or if there is an access handler installed for any of it. Because
8801 * of lock prefix guarantees, we're in for some extra clutter when this
8802 * happens.
8803 *
8804 * This may raise a \#GP, \#SS, \#PF or \#AC.
8805 *
8806 * @returns VBox strict status code.
8807 *
8808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8809 * @param ppvMem Where to return the pointer to the mapped
8810 * memory.
8811 * @param cbMem The number of bytes to map. This is usually 1,
8812 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8813 * string operations it can be up to a page.
8814 * @param iSegReg The index of the segment register to use for
8815 * this access. The base and limits are checked.
8816 * Use UINT8_MAX to indicate that no segmentation
8817 * is required (for IDT, GDT and LDT accesses).
8818 * @param GCPtrMem The address of the guest memory.
8819 * @param fAccess How the memory is being accessed. The
8820 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8821 * how to map the memory, while the
8822 * IEM_ACCESS_WHAT_XXX bit is used when raising
8823 * exceptions.
8824 */
8825IEM_STATIC VBOXSTRICTRC
8826iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8827{
8828 /*
8829 * Check the input and figure out which mapping entry to use.
8830 */
8831 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8832 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8833 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8834
8835 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8836 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8837 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8838 {
8839 iMemMap = iemMemMapFindFree(pVCpu);
8840 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8841 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8842 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8843 pVCpu->iem.s.aMemMappings[2].fAccess),
8844 VERR_IEM_IPE_9);
8845 }
8846
8847 /*
8848 * Map the memory, checking that we can actually access it. If something
8849 * slightly complicated happens, fall back on bounce buffering.
8850 */
8851 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8852 if (rcStrict != VINF_SUCCESS)
8853 return rcStrict;
8854
8855 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8856 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8857
8858 RTGCPHYS GCPhysFirst;
8859 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8860 if (rcStrict != VINF_SUCCESS)
8861 return rcStrict;
8862
8863 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8864 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8865 if (fAccess & IEM_ACCESS_TYPE_READ)
8866 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8867
8868 void *pvMem;
8869 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8870 if (rcStrict != VINF_SUCCESS)
8871 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8872
8873 /*
8874 * Fill in the mapping table entry.
8875 */
8876 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8877 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8878 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8879 pVCpu->iem.s.cActiveMappings++;
8880
8881 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8882 *ppvMem = pvMem;
8883
8884 return VINF_SUCCESS;
8885}
8886
8887
8888/**
8889 * Commits the guest memory if bounce buffered and unmaps it.
8890 *
8891 * @returns Strict VBox status code.
8892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8893 * @param pvMem The mapping.
8894 * @param fAccess The kind of access.
8895 */
8896IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8897{
8898 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8899 AssertReturn(iMemMap >= 0, iMemMap);
8900
8901 /* If it's bounce buffered, we may need to write back the buffer. */
8902 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8903 {
8904 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8905 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8906 }
8907 /* Otherwise unlock it. */
8908 else
8909 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8910
8911 /* Free the entry. */
8912 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8913 Assert(pVCpu->iem.s.cActiveMappings != 0);
8914 pVCpu->iem.s.cActiveMappings--;
8915 return VINF_SUCCESS;
8916}
8917
8918#ifdef IEM_WITH_SETJMP
8919
8920/**
8921 * Maps the specified guest memory for the given kind of access, longjmp on
8922 * error.
8923 *
8924 * This may be using bounce buffering of the memory if it's crossing a page
8925 * boundary or if there is an access handler installed for any of it. Because
8926 * of lock prefix guarantees, we're in for some extra clutter when this
8927 * happens.
8928 *
8929 * This may raise a \#GP, \#SS, \#PF or \#AC.
8930 *
8931 * @returns Pointer to the mapped memory.
8932 *
8933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8934 * @param cbMem The number of bytes to map. This is usually 1,
8935 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8936 * string operations it can be up to a page.
8937 * @param iSegReg The index of the segment register to use for
8938 * this access. The base and limits are checked.
8939 * Use UINT8_MAX to indicate that no segmentation
8940 * is required (for IDT, GDT and LDT accesses).
8941 * @param GCPtrMem The address of the guest memory.
8942 * @param fAccess How the memory is being accessed. The
8943 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8944 * how to map the memory, while the
8945 * IEM_ACCESS_WHAT_XXX bit is used when raising
8946 * exceptions.
8947 */
8948IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8949{
8950 /*
8951 * Check the input and figure out which mapping entry to use.
8952 */
8953 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8954 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8955 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8956
8957 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8958 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8959 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8960 {
8961 iMemMap = iemMemMapFindFree(pVCpu);
8962 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8963 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8964 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8965 pVCpu->iem.s.aMemMappings[2].fAccess),
8966 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8967 }
8968
8969 /*
8970 * Map the memory, checking that we can actually access it. If something
8971 * slightly complicated happens, fall back on bounce buffering.
8972 */
8973 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8974 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8975 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8976
8977 /* Crossing a page boundary? */
8978 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8979 { /* No (likely). */ }
8980 else
8981 {
8982 void *pvMem;
8983 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8984 if (rcStrict == VINF_SUCCESS)
8985 return pvMem;
8986 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8987 }
8988
8989 RTGCPHYS GCPhysFirst;
8990 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8991 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8992 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8993
8994 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8995 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8996 if (fAccess & IEM_ACCESS_TYPE_READ)
8997 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8998
8999 void *pvMem;
9000 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9001 if (rcStrict == VINF_SUCCESS)
9002 { /* likely */ }
9003 else
9004 {
9005 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9006 if (rcStrict == VINF_SUCCESS)
9007 return pvMem;
9008 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9009 }
9010
9011 /*
9012 * Fill in the mapping table entry.
9013 */
9014 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9015 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9016 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9017 pVCpu->iem.s.cActiveMappings++;
9018
9019 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9020 return pvMem;
9021}
9022
9023
9024/**
9025 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9026 *
9027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9028 * @param pvMem The mapping.
9029 * @param fAccess The kind of access.
9030 */
9031IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9032{
9033 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9034 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9035
9036 /* If it's bounce buffered, we may need to write back the buffer. */
9037 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9038 {
9039 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9040 {
9041 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9042 if (rcStrict == VINF_SUCCESS)
9043 return;
9044 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9045 }
9046 }
9047 /* Otherwise unlock it. */
9048 else
9049 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9050
9051 /* Free the entry. */
9052 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9053 Assert(pVCpu->iem.s.cActiveMappings != 0);
9054 pVCpu->iem.s.cActiveMappings--;
9055}
9056
9057#endif /* IEM_WITH_SETJMP */
9058
9059#ifndef IN_RING3
9060/**
9061 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9062 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9063 *
9064 * Allows the instruction to be completed and retired, while the IEM user will
9065 * return to ring-3 immediately afterwards and do the postponed writes there.
9066 *
9067 * @returns VBox status code (no strict statuses). Caller must check
9068 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9070 * @param pvMem The mapping.
9071 * @param fAccess The kind of access.
9072 */
9073IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9074{
9075 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9076 AssertReturn(iMemMap >= 0, iMemMap);
9077
9078 /* If it's bounce buffered, we may need to write back the buffer. */
9079 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9080 {
9081 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9082 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9083 }
9084 /* Otherwise unlock it. */
9085 else
9086 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9087
9088 /* Free the entry. */
9089 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9090 Assert(pVCpu->iem.s.cActiveMappings != 0);
9091 pVCpu->iem.s.cActiveMappings--;
9092 return VINF_SUCCESS;
9093}
9094#endif
9095
9096
9097/**
9098 * Rollbacks mappings, releasing page locks and such.
9099 *
9100 * The caller shall only call this after checking cActiveMappings.
9101 *
9102 * @returns Strict VBox status code to pass up.
9103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9104 */
9105IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9106{
9107 Assert(pVCpu->iem.s.cActiveMappings > 0);
9108
9109 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9110 while (iMemMap-- > 0)
9111 {
9112 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9113 if (fAccess != IEM_ACCESS_INVALID)
9114 {
9115 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9116 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9117 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9118 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9119 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9120 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9121 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9122 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9123 pVCpu->iem.s.cActiveMappings--;
9124 }
9125 }
9126}
9127
9128
9129/**
9130 * Fetches a data byte.
9131 *
9132 * @returns Strict VBox status code.
9133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9134 * @param pu8Dst Where to return the byte.
9135 * @param iSegReg The index of the segment register to use for
9136 * this access. The base and limits are checked.
9137 * @param GCPtrMem The address of the guest memory.
9138 */
9139IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9140{
9141 /* The lazy approach for now... */
9142 uint8_t const *pu8Src;
9143 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9144 if (rc == VINF_SUCCESS)
9145 {
9146 *pu8Dst = *pu8Src;
9147 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9148 }
9149 return rc;
9150}
9151
9152
9153#ifdef IEM_WITH_SETJMP
9154/**
9155 * Fetches a data byte, longjmp on error.
9156 *
9157 * @returns The byte.
9158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9159 * @param iSegReg The index of the segment register to use for
9160 * this access. The base and limits are checked.
9161 * @param GCPtrMem The address of the guest memory.
9162 */
9163DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9164{
9165 /* The lazy approach for now... */
9166 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9167 uint8_t const bRet = *pu8Src;
9168 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9169 return bRet;
9170}
9171#endif /* IEM_WITH_SETJMP */
9172
9173
9174/**
9175 * Fetches a data word.
9176 *
9177 * @returns Strict VBox status code.
9178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9179 * @param pu16Dst Where to return the word.
9180 * @param iSegReg The index of the segment register to use for
9181 * this access. The base and limits are checked.
9182 * @param GCPtrMem The address of the guest memory.
9183 */
9184IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9185{
9186 /* The lazy approach for now... */
9187 uint16_t const *pu16Src;
9188 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9189 if (rc == VINF_SUCCESS)
9190 {
9191 *pu16Dst = *pu16Src;
9192 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9193 }
9194 return rc;
9195}
9196
9197
9198#ifdef IEM_WITH_SETJMP
9199/**
9200 * Fetches a data word, longjmp on error.
9201 *
9202 * @returns The word
9203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9204 * @param iSegReg The index of the segment register to use for
9205 * this access. The base and limits are checked.
9206 * @param GCPtrMem The address of the guest memory.
9207 */
9208DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9209{
9210 /* The lazy approach for now... */
9211 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9212 uint16_t const u16Ret = *pu16Src;
9213 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9214 return u16Ret;
9215}
9216#endif
9217
9218
9219/**
9220 * Fetches a data dword.
9221 *
9222 * @returns Strict VBox status code.
9223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9224 * @param pu32Dst Where to return the dword.
9225 * @param iSegReg The index of the segment register to use for
9226 * this access. The base and limits are checked.
9227 * @param GCPtrMem The address of the guest memory.
9228 */
9229IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9230{
9231 /* The lazy approach for now... */
9232 uint32_t const *pu32Src;
9233 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9234 if (rc == VINF_SUCCESS)
9235 {
9236 *pu32Dst = *pu32Src;
9237 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9238 }
9239 return rc;
9240}
9241
9242
9243#ifdef IEM_WITH_SETJMP
9244
9245IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9246{
9247 Assert(cbMem >= 1);
9248 Assert(iSegReg < X86_SREG_COUNT);
9249
9250 /*
9251 * 64-bit mode is simpler.
9252 */
9253 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9254 {
9255 if (iSegReg >= X86_SREG_FS)
9256 {
9257 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9258 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9259 GCPtrMem += pSel->u64Base;
9260 }
9261
9262 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9263 return GCPtrMem;
9264 }
9265 /*
9266 * 16-bit and 32-bit segmentation.
9267 */
9268 else
9269 {
9270 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9271 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9272 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9273 == X86DESCATTR_P /* data, expand up */
9274 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9275 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9276 {
9277 /* expand up */
9278 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9279 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9280 && GCPtrLast32 > (uint32_t)GCPtrMem))
9281 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9282 }
9283 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9284 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9285 {
9286 /* expand down */
9287 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9288 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9289 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9290 && GCPtrLast32 > (uint32_t)GCPtrMem))
9291 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9292 }
9293 else
9294 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9295 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9296 }
9297 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9298}
9299
9300
9301IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9302{
9303 Assert(cbMem >= 1);
9304 Assert(iSegReg < X86_SREG_COUNT);
9305
9306 /*
9307 * 64-bit mode is simpler.
9308 */
9309 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9310 {
9311 if (iSegReg >= X86_SREG_FS)
9312 {
9313 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9314 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9315 GCPtrMem += pSel->u64Base;
9316 }
9317
9318 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9319 return GCPtrMem;
9320 }
9321 /*
9322 * 16-bit and 32-bit segmentation.
9323 */
9324 else
9325 {
9326 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9327 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9328 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9329 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9330 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9331 {
9332 /* expand up */
9333 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9334 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9335 && GCPtrLast32 > (uint32_t)GCPtrMem))
9336 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9337 }
9338 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9339 {
9340 /* expand down */
9341 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9342 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9343 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9344 && GCPtrLast32 > (uint32_t)GCPtrMem))
9345 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9346 }
9347 else
9348 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9349 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9350 }
9351 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9352}
9353
9354
9355/**
9356 * Fetches a data dword, longjmp on error, fallback/safe version.
9357 *
9358 * @returns The dword
9359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9360 * @param iSegReg The index of the segment register to use for
9361 * this access. The base and limits are checked.
9362 * @param GCPtrMem The address of the guest memory.
9363 */
9364IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9365{
9366 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9367 uint32_t const u32Ret = *pu32Src;
9368 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9369 return u32Ret;
9370}
9371
9372
9373/**
9374 * Fetches a data dword, longjmp on error.
9375 *
9376 * @returns The dword
9377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9378 * @param iSegReg The index of the segment register to use for
9379 * this access. The base and limits are checked.
9380 * @param GCPtrMem The address of the guest memory.
9381 */
9382DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9383{
9384# ifdef IEM_WITH_DATA_TLB
9385 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9386 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9387 {
9388 /// @todo more later.
9389 }
9390
9391 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9392# else
9393 /* The lazy approach. */
9394 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9395 uint32_t const u32Ret = *pu32Src;
9396 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9397 return u32Ret;
9398# endif
9399}
9400#endif
9401
9402
9403#ifdef SOME_UNUSED_FUNCTION
9404/**
9405 * Fetches a data dword and sign extends it to a qword.
9406 *
9407 * @returns Strict VBox status code.
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param pu64Dst Where to return the sign extended value.
9410 * @param iSegReg The index of the segment register to use for
9411 * this access. The base and limits are checked.
9412 * @param GCPtrMem The address of the guest memory.
9413 */
9414IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9415{
9416 /* The lazy approach for now... */
9417 int32_t const *pi32Src;
9418 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9419 if (rc == VINF_SUCCESS)
9420 {
9421 *pu64Dst = *pi32Src;
9422 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9423 }
9424#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9425 else
9426 *pu64Dst = 0;
9427#endif
9428 return rc;
9429}
9430#endif
9431
9432
9433/**
9434 * Fetches a data qword.
9435 *
9436 * @returns Strict VBox status code.
9437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9438 * @param pu64Dst Where to return the qword.
9439 * @param iSegReg The index of the segment register to use for
9440 * this access. The base and limits are checked.
9441 * @param GCPtrMem The address of the guest memory.
9442 */
9443IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9444{
9445 /* The lazy approach for now... */
9446 uint64_t const *pu64Src;
9447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9448 if (rc == VINF_SUCCESS)
9449 {
9450 *pu64Dst = *pu64Src;
9451 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9452 }
9453 return rc;
9454}
9455
9456
9457#ifdef IEM_WITH_SETJMP
9458/**
9459 * Fetches a data qword, longjmp on error.
9460 *
9461 * @returns The qword.
9462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9463 * @param iSegReg The index of the segment register to use for
9464 * this access. The base and limits are checked.
9465 * @param GCPtrMem The address of the guest memory.
9466 */
9467DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9468{
9469 /* The lazy approach for now... */
9470 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9471 uint64_t const u64Ret = *pu64Src;
9472 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9473 return u64Ret;
9474}
9475#endif
9476
9477
9478/**
9479 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9480 *
9481 * @returns Strict VBox status code.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param pu64Dst Where to return the qword.
9484 * @param iSegReg The index of the segment register to use for
9485 * this access. The base and limits are checked.
9486 * @param GCPtrMem The address of the guest memory.
9487 */
9488IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9489{
9490 /* The lazy approach for now... */
9491 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9492 if (RT_UNLIKELY(GCPtrMem & 15))
9493 return iemRaiseGeneralProtectionFault0(pVCpu);
9494
9495 uint64_t const *pu64Src;
9496 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9497 if (rc == VINF_SUCCESS)
9498 {
9499 *pu64Dst = *pu64Src;
9500 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9501 }
9502 return rc;
9503}
9504
9505
9506#ifdef IEM_WITH_SETJMP
9507/**
9508 * Fetches a data qword, longjmp on error.
9509 *
9510 * @returns The qword.
9511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9512 * @param iSegReg The index of the segment register to use for
9513 * this access. The base and limits are checked.
9514 * @param GCPtrMem The address of the guest memory.
9515 */
9516DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9517{
9518 /* The lazy approach for now... */
9519 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9520 if (RT_LIKELY(!(GCPtrMem & 15)))
9521 {
9522 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9523 uint64_t const u64Ret = *pu64Src;
9524 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9525 return u64Ret;
9526 }
9527
9528 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9529 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9530}
9531#endif
9532
9533
9534/**
9535 * Fetches a data tword.
9536 *
9537 * @returns Strict VBox status code.
9538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9539 * @param pr80Dst Where to return the tword.
9540 * @param iSegReg The index of the segment register to use for
9541 * this access. The base and limits are checked.
9542 * @param GCPtrMem The address of the guest memory.
9543 */
9544IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9545{
9546 /* The lazy approach for now... */
9547 PCRTFLOAT80U pr80Src;
9548 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9549 if (rc == VINF_SUCCESS)
9550 {
9551 *pr80Dst = *pr80Src;
9552 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9553 }
9554 return rc;
9555}
9556
9557
9558#ifdef IEM_WITH_SETJMP
9559/**
9560 * Fetches a data tword, longjmp on error.
9561 *
9562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9563 * @param pr80Dst Where to return the tword.
9564 * @param iSegReg The index of the segment register to use for
9565 * this access. The base and limits are checked.
9566 * @param GCPtrMem The address of the guest memory.
9567 */
9568DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9569{
9570 /* The lazy approach for now... */
9571 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9572 *pr80Dst = *pr80Src;
9573 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9574}
9575#endif
9576
9577
9578/**
9579 * Fetches a data dqword (double qword), generally SSE related.
9580 *
9581 * @returns Strict VBox status code.
9582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9583 * @param pu128Dst Where to return the qword.
9584 * @param iSegReg The index of the segment register to use for
9585 * this access. The base and limits are checked.
9586 * @param GCPtrMem The address of the guest memory.
9587 */
9588IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9589{
9590 /* The lazy approach for now... */
9591 PCRTUINT128U pu128Src;
9592 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9593 if (rc == VINF_SUCCESS)
9594 {
9595 pu128Dst->au64[0] = pu128Src->au64[0];
9596 pu128Dst->au64[1] = pu128Src->au64[1];
9597 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9598 }
9599 return rc;
9600}
9601
9602
9603#ifdef IEM_WITH_SETJMP
9604/**
9605 * Fetches a data dqword (double qword), generally SSE related.
9606 *
9607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9608 * @param pu128Dst Where to return the qword.
9609 * @param iSegReg The index of the segment register to use for
9610 * this access. The base and limits are checked.
9611 * @param GCPtrMem The address of the guest memory.
9612 */
9613IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9614{
9615 /* The lazy approach for now... */
9616 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9617 pu128Dst->au64[0] = pu128Src->au64[0];
9618 pu128Dst->au64[1] = pu128Src->au64[1];
9619 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9620}
9621#endif
9622
9623
9624/**
9625 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9626 * related.
9627 *
9628 * Raises \#GP(0) if not aligned.
9629 *
9630 * @returns Strict VBox status code.
9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9632 * @param pu128Dst Where to return the qword.
9633 * @param iSegReg The index of the segment register to use for
9634 * this access. The base and limits are checked.
9635 * @param GCPtrMem The address of the guest memory.
9636 */
9637IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9638{
9639 /* The lazy approach for now... */
9640 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9641 if ( (GCPtrMem & 15)
9642 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9643 return iemRaiseGeneralProtectionFault0(pVCpu);
9644
9645 PCRTUINT128U pu128Src;
9646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9647 if (rc == VINF_SUCCESS)
9648 {
9649 pu128Dst->au64[0] = pu128Src->au64[0];
9650 pu128Dst->au64[1] = pu128Src->au64[1];
9651 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9652 }
9653 return rc;
9654}
9655
9656
9657#ifdef IEM_WITH_SETJMP
9658/**
9659 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9660 * related, longjmp on error.
9661 *
9662 * Raises \#GP(0) if not aligned.
9663 *
9664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9665 * @param pu128Dst Where to return the qword.
9666 * @param iSegReg The index of the segment register to use for
9667 * this access. The base and limits are checked.
9668 * @param GCPtrMem The address of the guest memory.
9669 */
9670DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9671{
9672 /* The lazy approach for now... */
9673 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9674 if ( (GCPtrMem & 15) == 0
9675 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9676 {
9677 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9678 pu128Dst->au64[0] = pu128Src->au64[0];
9679 pu128Dst->au64[1] = pu128Src->au64[1];
9680 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9681 return;
9682 }
9683
9684 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9685 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9686}
9687#endif
9688
9689
9690/**
9691 * Fetches a data oword (octo word), generally AVX related.
9692 *
9693 * @returns Strict VBox status code.
9694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9695 * @param pu256Dst Where to return the qword.
9696 * @param iSegReg The index of the segment register to use for
9697 * this access. The base and limits are checked.
9698 * @param GCPtrMem The address of the guest memory.
9699 */
9700IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9701{
9702 /* The lazy approach for now... */
9703 PCRTUINT256U pu256Src;
9704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9705 if (rc == VINF_SUCCESS)
9706 {
9707 pu256Dst->au64[0] = pu256Src->au64[0];
9708 pu256Dst->au64[1] = pu256Src->au64[1];
9709 pu256Dst->au64[2] = pu256Src->au64[2];
9710 pu256Dst->au64[3] = pu256Src->au64[3];
9711 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9712 }
9713 return rc;
9714}
9715
9716
9717#ifdef IEM_WITH_SETJMP
9718/**
9719 * Fetches a data oword (octo word), generally AVX related.
9720 *
9721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9722 * @param pu256Dst Where to return the qword.
9723 * @param iSegReg The index of the segment register to use for
9724 * this access. The base and limits are checked.
9725 * @param GCPtrMem The address of the guest memory.
9726 */
9727IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9728{
9729 /* The lazy approach for now... */
9730 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9731 pu256Dst->au64[0] = pu256Src->au64[0];
9732 pu256Dst->au64[1] = pu256Src->au64[1];
9733 pu256Dst->au64[2] = pu256Src->au64[2];
9734 pu256Dst->au64[3] = pu256Src->au64[3];
9735 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9736}
9737#endif
9738
9739
9740/**
9741 * Fetches a data oword (octo word) at an aligned address, generally AVX
9742 * related.
9743 *
9744 * Raises \#GP(0) if not aligned.
9745 *
9746 * @returns Strict VBox status code.
9747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9748 * @param pu256Dst Where to return the qword.
9749 * @param iSegReg The index of the segment register to use for
9750 * this access. The base and limits are checked.
9751 * @param GCPtrMem The address of the guest memory.
9752 */
9753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9754{
9755 /* The lazy approach for now... */
9756 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9757 if (GCPtrMem & 31)
9758 return iemRaiseGeneralProtectionFault0(pVCpu);
9759
9760 PCRTUINT256U pu256Src;
9761 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9762 if (rc == VINF_SUCCESS)
9763 {
9764 pu256Dst->au64[0] = pu256Src->au64[0];
9765 pu256Dst->au64[1] = pu256Src->au64[1];
9766 pu256Dst->au64[2] = pu256Src->au64[2];
9767 pu256Dst->au64[3] = pu256Src->au64[3];
9768 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9769 }
9770 return rc;
9771}
9772
9773
9774#ifdef IEM_WITH_SETJMP
9775/**
9776 * Fetches a data oword (octo word) at an aligned address, generally AVX
9777 * related, longjmp on error.
9778 *
9779 * Raises \#GP(0) if not aligned.
9780 *
9781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9782 * @param pu256Dst Where to return the qword.
9783 * @param iSegReg The index of the segment register to use for
9784 * this access. The base and limits are checked.
9785 * @param GCPtrMem The address of the guest memory.
9786 */
9787DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9788{
9789 /* The lazy approach for now... */
9790 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9791 if ((GCPtrMem & 31) == 0)
9792 {
9793 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9794 pu256Dst->au64[0] = pu256Src->au64[0];
9795 pu256Dst->au64[1] = pu256Src->au64[1];
9796 pu256Dst->au64[2] = pu256Src->au64[2];
9797 pu256Dst->au64[3] = pu256Src->au64[3];
9798 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9799 return;
9800 }
9801
9802 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9804}
9805#endif
9806
9807
9808
9809/**
9810 * Fetches a descriptor register (lgdt, lidt).
9811 *
9812 * @returns Strict VBox status code.
9813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9814 * @param pcbLimit Where to return the limit.
9815 * @param pGCPtrBase Where to return the base.
9816 * @param iSegReg The index of the segment register to use for
9817 * this access. The base and limits are checked.
9818 * @param GCPtrMem The address of the guest memory.
9819 * @param enmOpSize The effective operand size.
9820 */
9821IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9822 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9823{
9824 /*
9825 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9826 * little special:
9827 * - The two reads are done separately.
9828 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9829 * - We suspect the 386 to actually commit the limit before the base in
9830 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9831 * don't try emulate this eccentric behavior, because it's not well
9832 * enough understood and rather hard to trigger.
9833 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9834 */
9835 VBOXSTRICTRC rcStrict;
9836 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9837 {
9838 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9839 if (rcStrict == VINF_SUCCESS)
9840 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9841 }
9842 else
9843 {
9844 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9845 if (enmOpSize == IEMMODE_32BIT)
9846 {
9847 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9848 {
9849 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9850 if (rcStrict == VINF_SUCCESS)
9851 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9852 }
9853 else
9854 {
9855 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9856 if (rcStrict == VINF_SUCCESS)
9857 {
9858 *pcbLimit = (uint16_t)uTmp;
9859 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9860 }
9861 }
9862 if (rcStrict == VINF_SUCCESS)
9863 *pGCPtrBase = uTmp;
9864 }
9865 else
9866 {
9867 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9868 if (rcStrict == VINF_SUCCESS)
9869 {
9870 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9871 if (rcStrict == VINF_SUCCESS)
9872 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9873 }
9874 }
9875 }
9876 return rcStrict;
9877}
9878
9879
9880
9881/**
9882 * Stores a data byte.
9883 *
9884 * @returns Strict VBox status code.
9885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9886 * @param iSegReg The index of the segment register to use for
9887 * this access. The base and limits are checked.
9888 * @param GCPtrMem The address of the guest memory.
9889 * @param u8Value The value to store.
9890 */
9891IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9892{
9893 /* The lazy approach for now... */
9894 uint8_t *pu8Dst;
9895 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9896 if (rc == VINF_SUCCESS)
9897 {
9898 *pu8Dst = u8Value;
9899 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9900 }
9901 return rc;
9902}
9903
9904
9905#ifdef IEM_WITH_SETJMP
9906/**
9907 * Stores a data byte, longjmp on error.
9908 *
9909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9910 * @param iSegReg The index of the segment register to use for
9911 * this access. The base and limits are checked.
9912 * @param GCPtrMem The address of the guest memory.
9913 * @param u8Value The value to store.
9914 */
9915IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9916{
9917 /* The lazy approach for now... */
9918 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9919 *pu8Dst = u8Value;
9920 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9921}
9922#endif
9923
9924
9925/**
9926 * Stores a data word.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9930 * @param iSegReg The index of the segment register to use for
9931 * this access. The base and limits are checked.
9932 * @param GCPtrMem The address of the guest memory.
9933 * @param u16Value The value to store.
9934 */
9935IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9936{
9937 /* The lazy approach for now... */
9938 uint16_t *pu16Dst;
9939 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9940 if (rc == VINF_SUCCESS)
9941 {
9942 *pu16Dst = u16Value;
9943 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9944 }
9945 return rc;
9946}
9947
9948
9949#ifdef IEM_WITH_SETJMP
9950/**
9951 * Stores a data word, longjmp on error.
9952 *
9953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9954 * @param iSegReg The index of the segment register to use for
9955 * this access. The base and limits are checked.
9956 * @param GCPtrMem The address of the guest memory.
9957 * @param u16Value The value to store.
9958 */
9959IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9960{
9961 /* The lazy approach for now... */
9962 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9963 *pu16Dst = u16Value;
9964 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9965}
9966#endif
9967
9968
9969/**
9970 * Stores a data dword.
9971 *
9972 * @returns Strict VBox status code.
9973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9974 * @param iSegReg The index of the segment register to use for
9975 * this access. The base and limits are checked.
9976 * @param GCPtrMem The address of the guest memory.
9977 * @param u32Value The value to store.
9978 */
9979IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9980{
9981 /* The lazy approach for now... */
9982 uint32_t *pu32Dst;
9983 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9984 if (rc == VINF_SUCCESS)
9985 {
9986 *pu32Dst = u32Value;
9987 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9988 }
9989 return rc;
9990}
9991
9992
9993#ifdef IEM_WITH_SETJMP
9994/**
9995 * Stores a data dword.
9996 *
9997 * @returns Strict VBox status code.
9998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9999 * @param iSegReg The index of the segment register to use for
10000 * this access. The base and limits are checked.
10001 * @param GCPtrMem The address of the guest memory.
10002 * @param u32Value The value to store.
10003 */
10004IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10005{
10006 /* The lazy approach for now... */
10007 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10008 *pu32Dst = u32Value;
10009 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10010}
10011#endif
10012
10013
10014/**
10015 * Stores a data qword.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u64Value The value to store.
10023 */
10024IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10025{
10026 /* The lazy approach for now... */
10027 uint64_t *pu64Dst;
10028 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10029 if (rc == VINF_SUCCESS)
10030 {
10031 *pu64Dst = u64Value;
10032 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10033 }
10034 return rc;
10035}
10036
10037
10038#ifdef IEM_WITH_SETJMP
10039/**
10040 * Stores a data qword, longjmp on error.
10041 *
10042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10043 * @param iSegReg The index of the segment register to use for
10044 * this access. The base and limits are checked.
10045 * @param GCPtrMem The address of the guest memory.
10046 * @param u64Value The value to store.
10047 */
10048IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10049{
10050 /* The lazy approach for now... */
10051 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10052 *pu64Dst = u64Value;
10053 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10054}
10055#endif
10056
10057
10058/**
10059 * Stores a data dqword.
10060 *
10061 * @returns Strict VBox status code.
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param iSegReg The index of the segment register to use for
10064 * this access. The base and limits are checked.
10065 * @param GCPtrMem The address of the guest memory.
10066 * @param u128Value The value to store.
10067 */
10068IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10069{
10070 /* The lazy approach for now... */
10071 PRTUINT128U pu128Dst;
10072 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10073 if (rc == VINF_SUCCESS)
10074 {
10075 pu128Dst->au64[0] = u128Value.au64[0];
10076 pu128Dst->au64[1] = u128Value.au64[1];
10077 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10078 }
10079 return rc;
10080}
10081
10082
10083#ifdef IEM_WITH_SETJMP
10084/**
10085 * Stores a data dqword, longjmp on error.
10086 *
10087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10088 * @param iSegReg The index of the segment register to use for
10089 * this access. The base and limits are checked.
10090 * @param GCPtrMem The address of the guest memory.
10091 * @param u128Value The value to store.
10092 */
10093IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10094{
10095 /* The lazy approach for now... */
10096 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10097 pu128Dst->au64[0] = u128Value.au64[0];
10098 pu128Dst->au64[1] = u128Value.au64[1];
10099 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10100}
10101#endif
10102
10103
10104/**
10105 * Stores a data dqword, SSE aligned.
10106 *
10107 * @returns Strict VBox status code.
10108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10109 * @param iSegReg The index of the segment register to use for
10110 * this access. The base and limits are checked.
10111 * @param GCPtrMem The address of the guest memory.
10112 * @param u128Value The value to store.
10113 */
10114IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10115{
10116 /* The lazy approach for now... */
10117 if ( (GCPtrMem & 15)
10118 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10119 return iemRaiseGeneralProtectionFault0(pVCpu);
10120
10121 PRTUINT128U pu128Dst;
10122 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10123 if (rc == VINF_SUCCESS)
10124 {
10125 pu128Dst->au64[0] = u128Value.au64[0];
10126 pu128Dst->au64[1] = u128Value.au64[1];
10127 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10128 }
10129 return rc;
10130}
10131
10132
10133#ifdef IEM_WITH_SETJMP
10134/**
10135 * Stores a data dqword, SSE aligned.
10136 *
10137 * @returns Strict VBox status code.
10138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10139 * @param iSegReg The index of the segment register to use for
10140 * this access. The base and limits are checked.
10141 * @param GCPtrMem The address of the guest memory.
10142 * @param u128Value The value to store.
10143 */
10144DECL_NO_INLINE(IEM_STATIC, void)
10145iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10146{
10147 /* The lazy approach for now... */
10148 if ( (GCPtrMem & 15) == 0
10149 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10150 {
10151 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10152 pu128Dst->au64[0] = u128Value.au64[0];
10153 pu128Dst->au64[1] = u128Value.au64[1];
10154 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10155 return;
10156 }
10157
10158 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10159 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10160}
10161#endif
10162
10163
10164/**
10165 * Stores a data dqword.
10166 *
10167 * @returns Strict VBox status code.
10168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10169 * @param iSegReg The index of the segment register to use for
10170 * this access. The base and limits are checked.
10171 * @param GCPtrMem The address of the guest memory.
10172 * @param pu256Value Pointer to the value to store.
10173 */
10174IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10175{
10176 /* The lazy approach for now... */
10177 PRTUINT256U pu256Dst;
10178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10179 if (rc == VINF_SUCCESS)
10180 {
10181 pu256Dst->au64[0] = pu256Value->au64[0];
10182 pu256Dst->au64[1] = pu256Value->au64[1];
10183 pu256Dst->au64[2] = pu256Value->au64[2];
10184 pu256Dst->au64[3] = pu256Value->au64[3];
10185 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10186 }
10187 return rc;
10188}
10189
10190
10191#ifdef IEM_WITH_SETJMP
10192/**
10193 * Stores a data dqword, longjmp on error.
10194 *
10195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10196 * @param iSegReg The index of the segment register to use for
10197 * this access. The base and limits are checked.
10198 * @param GCPtrMem The address of the guest memory.
10199 * @param pu256Value Pointer to the value to store.
10200 */
10201IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10202{
10203 /* The lazy approach for now... */
10204 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10205 pu256Dst->au64[0] = pu256Value->au64[0];
10206 pu256Dst->au64[1] = pu256Value->au64[1];
10207 pu256Dst->au64[2] = pu256Value->au64[2];
10208 pu256Dst->au64[3] = pu256Value->au64[3];
10209 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10210}
10211#endif
10212
10213
10214/**
10215 * Stores a data dqword, AVX aligned.
10216 *
10217 * @returns Strict VBox status code.
10218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10219 * @param iSegReg The index of the segment register to use for
10220 * this access. The base and limits are checked.
10221 * @param GCPtrMem The address of the guest memory.
10222 * @param pu256Value Pointer to the value to store.
10223 */
10224IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10225{
10226 /* The lazy approach for now... */
10227 if (GCPtrMem & 31)
10228 return iemRaiseGeneralProtectionFault0(pVCpu);
10229
10230 PRTUINT256U pu256Dst;
10231 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10232 if (rc == VINF_SUCCESS)
10233 {
10234 pu256Dst->au64[0] = pu256Value->au64[0];
10235 pu256Dst->au64[1] = pu256Value->au64[1];
10236 pu256Dst->au64[2] = pu256Value->au64[2];
10237 pu256Dst->au64[3] = pu256Value->au64[3];
10238 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10239 }
10240 return rc;
10241}
10242
10243
10244#ifdef IEM_WITH_SETJMP
10245/**
10246 * Stores a data dqword, AVX aligned.
10247 *
10248 * @returns Strict VBox status code.
10249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10250 * @param iSegReg The index of the segment register to use for
10251 * this access. The base and limits are checked.
10252 * @param GCPtrMem The address of the guest memory.
10253 * @param pu256Value Pointer to the value to store.
10254 */
10255DECL_NO_INLINE(IEM_STATIC, void)
10256iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10257{
10258 /* The lazy approach for now... */
10259 if ((GCPtrMem & 31) == 0)
10260 {
10261 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10262 pu256Dst->au64[0] = pu256Value->au64[0];
10263 pu256Dst->au64[1] = pu256Value->au64[1];
10264 pu256Dst->au64[2] = pu256Value->au64[2];
10265 pu256Dst->au64[3] = pu256Value->au64[3];
10266 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10267 return;
10268 }
10269
10270 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10271 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10272}
10273#endif
10274
10275
10276/**
10277 * Stores a descriptor register (sgdt, sidt).
10278 *
10279 * @returns Strict VBox status code.
10280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10281 * @param cbLimit The limit.
10282 * @param GCPtrBase The base address.
10283 * @param iSegReg The index of the segment register to use for
10284 * this access. The base and limits are checked.
10285 * @param GCPtrMem The address of the guest memory.
10286 */
10287IEM_STATIC VBOXSTRICTRC
10288iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10289{
10290 /*
10291 * The SIDT and SGDT instructions actually stores the data using two
10292 * independent writes. The instructions does not respond to opsize prefixes.
10293 */
10294 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10295 if (rcStrict == VINF_SUCCESS)
10296 {
10297 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10298 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10299 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10300 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10301 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10302 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10303 else
10304 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10305 }
10306 return rcStrict;
10307}
10308
10309
10310/**
10311 * Pushes a word onto the stack.
10312 *
10313 * @returns Strict VBox status code.
10314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10315 * @param u16Value The value to push.
10316 */
10317IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10318{
10319 /* Increment the stack pointer. */
10320 uint64_t uNewRsp;
10321 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10322
10323 /* Write the word the lazy way. */
10324 uint16_t *pu16Dst;
10325 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10326 if (rc == VINF_SUCCESS)
10327 {
10328 *pu16Dst = u16Value;
10329 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10330 }
10331
10332 /* Commit the new RSP value unless we an access handler made trouble. */
10333 if (rc == VINF_SUCCESS)
10334 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10335
10336 return rc;
10337}
10338
10339
10340/**
10341 * Pushes a dword onto the stack.
10342 *
10343 * @returns Strict VBox status code.
10344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10345 * @param u32Value The value to push.
10346 */
10347IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10348{
10349 /* Increment the stack pointer. */
10350 uint64_t uNewRsp;
10351 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10352
10353 /* Write the dword the lazy way. */
10354 uint32_t *pu32Dst;
10355 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10356 if (rc == VINF_SUCCESS)
10357 {
10358 *pu32Dst = u32Value;
10359 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10360 }
10361
10362 /* Commit the new RSP value unless we an access handler made trouble. */
10363 if (rc == VINF_SUCCESS)
10364 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10365
10366 return rc;
10367}
10368
10369
10370/**
10371 * Pushes a dword segment register value onto the stack.
10372 *
10373 * @returns Strict VBox status code.
10374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10375 * @param u32Value The value to push.
10376 */
10377IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10378{
10379 /* Increment the stack pointer. */
10380 uint64_t uNewRsp;
10381 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10382
10383 /* The intel docs talks about zero extending the selector register
10384 value. My actual intel CPU here might be zero extending the value
10385 but it still only writes the lower word... */
10386 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10387 * happens when crossing an electric page boundrary, is the high word checked
10388 * for write accessibility or not? Probably it is. What about segment limits?
10389 * It appears this behavior is also shared with trap error codes.
10390 *
10391 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10392 * ancient hardware when it actually did change. */
10393 uint16_t *pu16Dst;
10394 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10395 if (rc == VINF_SUCCESS)
10396 {
10397 *pu16Dst = (uint16_t)u32Value;
10398 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10399 }
10400
10401 /* Commit the new RSP value unless we an access handler made trouble. */
10402 if (rc == VINF_SUCCESS)
10403 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10404
10405 return rc;
10406}
10407
10408
10409/**
10410 * Pushes a qword onto the stack.
10411 *
10412 * @returns Strict VBox status code.
10413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10414 * @param u64Value The value to push.
10415 */
10416IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10417{
10418 /* Increment the stack pointer. */
10419 uint64_t uNewRsp;
10420 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10421
10422 /* Write the word the lazy way. */
10423 uint64_t *pu64Dst;
10424 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10425 if (rc == VINF_SUCCESS)
10426 {
10427 *pu64Dst = u64Value;
10428 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10429 }
10430
10431 /* Commit the new RSP value unless we an access handler made trouble. */
10432 if (rc == VINF_SUCCESS)
10433 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10434
10435 return rc;
10436}
10437
10438
10439/**
10440 * Pops a word from the stack.
10441 *
10442 * @returns Strict VBox status code.
10443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10444 * @param pu16Value Where to store the popped value.
10445 */
10446IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10447{
10448 /* Increment the stack pointer. */
10449 uint64_t uNewRsp;
10450 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10451
10452 /* Write the word the lazy way. */
10453 uint16_t const *pu16Src;
10454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10455 if (rc == VINF_SUCCESS)
10456 {
10457 *pu16Value = *pu16Src;
10458 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10459
10460 /* Commit the new RSP value. */
10461 if (rc == VINF_SUCCESS)
10462 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10463 }
10464
10465 return rc;
10466}
10467
10468
10469/**
10470 * Pops a dword from the stack.
10471 *
10472 * @returns Strict VBox status code.
10473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10474 * @param pu32Value Where to store the popped value.
10475 */
10476IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10477{
10478 /* Increment the stack pointer. */
10479 uint64_t uNewRsp;
10480 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10481
10482 /* Write the word the lazy way. */
10483 uint32_t const *pu32Src;
10484 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10485 if (rc == VINF_SUCCESS)
10486 {
10487 *pu32Value = *pu32Src;
10488 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10489
10490 /* Commit the new RSP value. */
10491 if (rc == VINF_SUCCESS)
10492 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10493 }
10494
10495 return rc;
10496}
10497
10498
10499/**
10500 * Pops a qword from the stack.
10501 *
10502 * @returns Strict VBox status code.
10503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10504 * @param pu64Value Where to store the popped value.
10505 */
10506IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10507{
10508 /* Increment the stack pointer. */
10509 uint64_t uNewRsp;
10510 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10511
10512 /* Write the word the lazy way. */
10513 uint64_t const *pu64Src;
10514 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10515 if (rc == VINF_SUCCESS)
10516 {
10517 *pu64Value = *pu64Src;
10518 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10519
10520 /* Commit the new RSP value. */
10521 if (rc == VINF_SUCCESS)
10522 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10523 }
10524
10525 return rc;
10526}
10527
10528
10529/**
10530 * Pushes a word onto the stack, using a temporary stack pointer.
10531 *
10532 * @returns Strict VBox status code.
10533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10534 * @param u16Value The value to push.
10535 * @param pTmpRsp Pointer to the temporary stack pointer.
10536 */
10537IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10538{
10539 /* Increment the stack pointer. */
10540 RTUINT64U NewRsp = *pTmpRsp;
10541 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10542
10543 /* Write the word the lazy way. */
10544 uint16_t *pu16Dst;
10545 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10546 if (rc == VINF_SUCCESS)
10547 {
10548 *pu16Dst = u16Value;
10549 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10550 }
10551
10552 /* Commit the new RSP value unless we an access handler made trouble. */
10553 if (rc == VINF_SUCCESS)
10554 *pTmpRsp = NewRsp;
10555
10556 return rc;
10557}
10558
10559
10560/**
10561 * Pushes a dword onto the stack, using a temporary stack pointer.
10562 *
10563 * @returns Strict VBox status code.
10564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10565 * @param u32Value The value to push.
10566 * @param pTmpRsp Pointer to the temporary stack pointer.
10567 */
10568IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10569{
10570 /* Increment the stack pointer. */
10571 RTUINT64U NewRsp = *pTmpRsp;
10572 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10573
10574 /* Write the word the lazy way. */
10575 uint32_t *pu32Dst;
10576 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10577 if (rc == VINF_SUCCESS)
10578 {
10579 *pu32Dst = u32Value;
10580 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10581 }
10582
10583 /* Commit the new RSP value unless we an access handler made trouble. */
10584 if (rc == VINF_SUCCESS)
10585 *pTmpRsp = NewRsp;
10586
10587 return rc;
10588}
10589
10590
10591/**
10592 * Pushes a dword onto the stack, using a temporary stack pointer.
10593 *
10594 * @returns Strict VBox status code.
10595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10596 * @param u64Value The value to push.
10597 * @param pTmpRsp Pointer to the temporary stack pointer.
10598 */
10599IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10600{
10601 /* Increment the stack pointer. */
10602 RTUINT64U NewRsp = *pTmpRsp;
10603 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10604
10605 /* Write the word the lazy way. */
10606 uint64_t *pu64Dst;
10607 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10608 if (rc == VINF_SUCCESS)
10609 {
10610 *pu64Dst = u64Value;
10611 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10612 }
10613
10614 /* Commit the new RSP value unless we an access handler made trouble. */
10615 if (rc == VINF_SUCCESS)
10616 *pTmpRsp = NewRsp;
10617
10618 return rc;
10619}
10620
10621
10622/**
10623 * Pops a word from the stack, using a temporary stack pointer.
10624 *
10625 * @returns Strict VBox status code.
10626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10627 * @param pu16Value Where to store the popped value.
10628 * @param pTmpRsp Pointer to the temporary stack pointer.
10629 */
10630IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10631{
10632 /* Increment the stack pointer. */
10633 RTUINT64U NewRsp = *pTmpRsp;
10634 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10635
10636 /* Write the word the lazy way. */
10637 uint16_t const *pu16Src;
10638 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10639 if (rc == VINF_SUCCESS)
10640 {
10641 *pu16Value = *pu16Src;
10642 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10643
10644 /* Commit the new RSP value. */
10645 if (rc == VINF_SUCCESS)
10646 *pTmpRsp = NewRsp;
10647 }
10648
10649 return rc;
10650}
10651
10652
10653/**
10654 * Pops a dword from the stack, using a temporary stack pointer.
10655 *
10656 * @returns Strict VBox status code.
10657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10658 * @param pu32Value Where to store the popped value.
10659 * @param pTmpRsp Pointer to the temporary stack pointer.
10660 */
10661IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10662{
10663 /* Increment the stack pointer. */
10664 RTUINT64U NewRsp = *pTmpRsp;
10665 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10666
10667 /* Write the word the lazy way. */
10668 uint32_t const *pu32Src;
10669 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10670 if (rc == VINF_SUCCESS)
10671 {
10672 *pu32Value = *pu32Src;
10673 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10674
10675 /* Commit the new RSP value. */
10676 if (rc == VINF_SUCCESS)
10677 *pTmpRsp = NewRsp;
10678 }
10679
10680 return rc;
10681}
10682
10683
10684/**
10685 * Pops a qword from the stack, using a temporary stack pointer.
10686 *
10687 * @returns Strict VBox status code.
10688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10689 * @param pu64Value Where to store the popped value.
10690 * @param pTmpRsp Pointer to the temporary stack pointer.
10691 */
10692IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10693{
10694 /* Increment the stack pointer. */
10695 RTUINT64U NewRsp = *pTmpRsp;
10696 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10697
10698 /* Write the word the lazy way. */
10699 uint64_t const *pu64Src;
10700 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10701 if (rcStrict == VINF_SUCCESS)
10702 {
10703 *pu64Value = *pu64Src;
10704 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10705
10706 /* Commit the new RSP value. */
10707 if (rcStrict == VINF_SUCCESS)
10708 *pTmpRsp = NewRsp;
10709 }
10710
10711 return rcStrict;
10712}
10713
10714
10715/**
10716 * Begin a special stack push (used by interrupt, exceptions and such).
10717 *
10718 * This will raise \#SS or \#PF if appropriate.
10719 *
10720 * @returns Strict VBox status code.
10721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10722 * @param cbMem The number of bytes to push onto the stack.
10723 * @param ppvMem Where to return the pointer to the stack memory.
10724 * As with the other memory functions this could be
10725 * direct access or bounce buffered access, so
10726 * don't commit register until the commit call
10727 * succeeds.
10728 * @param puNewRsp Where to return the new RSP value. This must be
10729 * passed unchanged to
10730 * iemMemStackPushCommitSpecial().
10731 */
10732IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10733{
10734 Assert(cbMem < UINT8_MAX);
10735 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10736 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10737}
10738
10739
10740/**
10741 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10742 *
10743 * This will update the rSP.
10744 *
10745 * @returns Strict VBox status code.
10746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10747 * @param pvMem The pointer returned by
10748 * iemMemStackPushBeginSpecial().
10749 * @param uNewRsp The new RSP value returned by
10750 * iemMemStackPushBeginSpecial().
10751 */
10752IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10753{
10754 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10755 if (rcStrict == VINF_SUCCESS)
10756 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10757 return rcStrict;
10758}
10759
10760
10761/**
10762 * Begin a special stack pop (used by iret, retf and such).
10763 *
10764 * This will raise \#SS or \#PF if appropriate.
10765 *
10766 * @returns Strict VBox status code.
10767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10768 * @param cbMem The number of bytes to pop from the stack.
10769 * @param ppvMem Where to return the pointer to the stack memory.
10770 * @param puNewRsp Where to return the new RSP value. This must be
10771 * assigned to CPUMCTX::rsp manually some time
10772 * after iemMemStackPopDoneSpecial() has been
10773 * called.
10774 */
10775IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10776{
10777 Assert(cbMem < UINT8_MAX);
10778 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10779 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10780}
10781
10782
10783/**
10784 * Continue a special stack pop (used by iret and retf).
10785 *
10786 * This will raise \#SS or \#PF if appropriate.
10787 *
10788 * @returns Strict VBox status code.
10789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10790 * @param cbMem The number of bytes to pop from the stack.
10791 * @param ppvMem Where to return the pointer to the stack memory.
10792 * @param puNewRsp Where to return the new RSP value. This must be
10793 * assigned to CPUMCTX::rsp manually some time
10794 * after iemMemStackPopDoneSpecial() has been
10795 * called.
10796 */
10797IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10798{
10799 Assert(cbMem < UINT8_MAX);
10800 RTUINT64U NewRsp;
10801 NewRsp.u = *puNewRsp;
10802 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10803 *puNewRsp = NewRsp.u;
10804 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10805}
10806
10807
10808/**
10809 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10810 * iemMemStackPopContinueSpecial).
10811 *
10812 * The caller will manually commit the rSP.
10813 *
10814 * @returns Strict VBox status code.
10815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10816 * @param pvMem The pointer returned by
10817 * iemMemStackPopBeginSpecial() or
10818 * iemMemStackPopContinueSpecial().
10819 */
10820IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10821{
10822 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10823}
10824
10825
10826/**
10827 * Fetches a system table byte.
10828 *
10829 * @returns Strict VBox status code.
10830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10831 * @param pbDst Where to return the byte.
10832 * @param iSegReg The index of the segment register to use for
10833 * this access. The base and limits are checked.
10834 * @param GCPtrMem The address of the guest memory.
10835 */
10836IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10837{
10838 /* The lazy approach for now... */
10839 uint8_t const *pbSrc;
10840 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10841 if (rc == VINF_SUCCESS)
10842 {
10843 *pbDst = *pbSrc;
10844 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10845 }
10846 return rc;
10847}
10848
10849
10850/**
10851 * Fetches a system table word.
10852 *
10853 * @returns Strict VBox status code.
10854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10855 * @param pu16Dst Where to return the word.
10856 * @param iSegReg The index of the segment register to use for
10857 * this access. The base and limits are checked.
10858 * @param GCPtrMem The address of the guest memory.
10859 */
10860IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10861{
10862 /* The lazy approach for now... */
10863 uint16_t const *pu16Src;
10864 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10865 if (rc == VINF_SUCCESS)
10866 {
10867 *pu16Dst = *pu16Src;
10868 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10869 }
10870 return rc;
10871}
10872
10873
10874/**
10875 * Fetches a system table dword.
10876 *
10877 * @returns Strict VBox status code.
10878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10879 * @param pu32Dst Where to return the dword.
10880 * @param iSegReg The index of the segment register to use for
10881 * this access. The base and limits are checked.
10882 * @param GCPtrMem The address of the guest memory.
10883 */
10884IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10885{
10886 /* The lazy approach for now... */
10887 uint32_t const *pu32Src;
10888 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10889 if (rc == VINF_SUCCESS)
10890 {
10891 *pu32Dst = *pu32Src;
10892 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10893 }
10894 return rc;
10895}
10896
10897
10898/**
10899 * Fetches a system table qword.
10900 *
10901 * @returns Strict VBox status code.
10902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10903 * @param pu64Dst Where to return the qword.
10904 * @param iSegReg The index of the segment register to use for
10905 * this access. The base and limits are checked.
10906 * @param GCPtrMem The address of the guest memory.
10907 */
10908IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10909{
10910 /* The lazy approach for now... */
10911 uint64_t const *pu64Src;
10912 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10913 if (rc == VINF_SUCCESS)
10914 {
10915 *pu64Dst = *pu64Src;
10916 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10917 }
10918 return rc;
10919}
10920
10921
10922/**
10923 * Fetches a descriptor table entry with caller specified error code.
10924 *
10925 * @returns Strict VBox status code.
10926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10927 * @param pDesc Where to return the descriptor table entry.
10928 * @param uSel The selector which table entry to fetch.
10929 * @param uXcpt The exception to raise on table lookup error.
10930 * @param uErrorCode The error code associated with the exception.
10931 */
10932IEM_STATIC VBOXSTRICTRC
10933iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10934{
10935 AssertPtr(pDesc);
10936 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10937
10938 /** @todo did the 286 require all 8 bytes to be accessible? */
10939 /*
10940 * Get the selector table base and check bounds.
10941 */
10942 RTGCPTR GCPtrBase;
10943 if (uSel & X86_SEL_LDT)
10944 {
10945 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10946 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10947 {
10948 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10949 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10950 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10951 uErrorCode, 0);
10952 }
10953
10954 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10955 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10956 }
10957 else
10958 {
10959 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10960 {
10961 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10962 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10963 uErrorCode, 0);
10964 }
10965 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10966 }
10967
10968 /*
10969 * Read the legacy descriptor and maybe the long mode extensions if
10970 * required.
10971 */
10972 VBOXSTRICTRC rcStrict;
10973 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10974 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10975 else
10976 {
10977 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10978 if (rcStrict == VINF_SUCCESS)
10979 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10980 if (rcStrict == VINF_SUCCESS)
10981 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10982 if (rcStrict == VINF_SUCCESS)
10983 pDesc->Legacy.au16[3] = 0;
10984 else
10985 return rcStrict;
10986 }
10987
10988 if (rcStrict == VINF_SUCCESS)
10989 {
10990 if ( !IEM_IS_LONG_MODE(pVCpu)
10991 || pDesc->Legacy.Gen.u1DescType)
10992 pDesc->Long.au64[1] = 0;
10993 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10994 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10995 else
10996 {
10997 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10998 /** @todo is this the right exception? */
10999 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11000 }
11001 }
11002 return rcStrict;
11003}
11004
11005
11006/**
11007 * Fetches a descriptor table entry.
11008 *
11009 * @returns Strict VBox status code.
11010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11011 * @param pDesc Where to return the descriptor table entry.
11012 * @param uSel The selector which table entry to fetch.
11013 * @param uXcpt The exception to raise on table lookup error.
11014 */
11015IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11016{
11017 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11018}
11019
11020
11021/**
11022 * Fakes a long mode stack selector for SS = 0.
11023 *
11024 * @param pDescSs Where to return the fake stack descriptor.
11025 * @param uDpl The DPL we want.
11026 */
11027IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11028{
11029 pDescSs->Long.au64[0] = 0;
11030 pDescSs->Long.au64[1] = 0;
11031 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11032 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11033 pDescSs->Long.Gen.u2Dpl = uDpl;
11034 pDescSs->Long.Gen.u1Present = 1;
11035 pDescSs->Long.Gen.u1Long = 1;
11036}
11037
11038
11039/**
11040 * Marks the selector descriptor as accessed (only non-system descriptors).
11041 *
11042 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11043 * will therefore skip the limit checks.
11044 *
11045 * @returns Strict VBox status code.
11046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11047 * @param uSel The selector.
11048 */
11049IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11050{
11051 /*
11052 * Get the selector table base and calculate the entry address.
11053 */
11054 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11055 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11056 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11057 GCPtr += uSel & X86_SEL_MASK;
11058
11059 /*
11060 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11061 * ugly stuff to avoid this. This will make sure it's an atomic access
11062 * as well more or less remove any question about 8-bit or 32-bit accesss.
11063 */
11064 VBOXSTRICTRC rcStrict;
11065 uint32_t volatile *pu32;
11066 if ((GCPtr & 3) == 0)
11067 {
11068 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11069 GCPtr += 2 + 2;
11070 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11071 if (rcStrict != VINF_SUCCESS)
11072 return rcStrict;
11073 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11074 }
11075 else
11076 {
11077 /* The misaligned GDT/LDT case, map the whole thing. */
11078 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11079 if (rcStrict != VINF_SUCCESS)
11080 return rcStrict;
11081 switch ((uintptr_t)pu32 & 3)
11082 {
11083 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11084 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11085 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11086 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11087 }
11088 }
11089
11090 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11091}
11092
11093/** @} */
11094
11095
11096/*
11097 * Include the C/C++ implementation of instruction.
11098 */
11099#include "IEMAllCImpl.cpp.h"
11100
11101
11102
11103/** @name "Microcode" macros.
11104 *
11105 * The idea is that we should be able to use the same code to interpret
11106 * instructions as well as recompiler instructions. Thus this obfuscation.
11107 *
11108 * @{
11109 */
11110#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11111#define IEM_MC_END() }
11112#define IEM_MC_PAUSE() do {} while (0)
11113#define IEM_MC_CONTINUE() do {} while (0)
11114
11115/** Internal macro. */
11116#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11117 do \
11118 { \
11119 VBOXSTRICTRC rcStrict2 = a_Expr; \
11120 if (rcStrict2 != VINF_SUCCESS) \
11121 return rcStrict2; \
11122 } while (0)
11123
11124
11125#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11126#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11127#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11128#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11129#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11130#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11131#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11132#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11133#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11134 do { \
11135 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11136 return iemRaiseDeviceNotAvailable(pVCpu); \
11137 } while (0)
11138#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11139 do { \
11140 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11141 return iemRaiseDeviceNotAvailable(pVCpu); \
11142 } while (0)
11143#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11144 do { \
11145 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11146 return iemRaiseMathFault(pVCpu); \
11147 } while (0)
11148#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11149 do { \
11150 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11151 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11152 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11153 return iemRaiseUndefinedOpcode(pVCpu); \
11154 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11155 return iemRaiseDeviceNotAvailable(pVCpu); \
11156 } while (0)
11157#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11158 do { \
11159 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11160 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11161 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11162 return iemRaiseUndefinedOpcode(pVCpu); \
11163 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11164 return iemRaiseDeviceNotAvailable(pVCpu); \
11165 } while (0)
11166#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11167 do { \
11168 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11169 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11170 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11171 return iemRaiseUndefinedOpcode(pVCpu); \
11172 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11173 return iemRaiseDeviceNotAvailable(pVCpu); \
11174 } while (0)
11175#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11176 do { \
11177 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11178 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11179 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11180 return iemRaiseUndefinedOpcode(pVCpu); \
11181 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11182 return iemRaiseDeviceNotAvailable(pVCpu); \
11183 } while (0)
11184#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11185 do { \
11186 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11187 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11188 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11189 return iemRaiseUndefinedOpcode(pVCpu); \
11190 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11191 return iemRaiseDeviceNotAvailable(pVCpu); \
11192 } while (0)
11193#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11194 do { \
11195 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11196 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11197 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11198 return iemRaiseUndefinedOpcode(pVCpu); \
11199 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11200 return iemRaiseDeviceNotAvailable(pVCpu); \
11201 } while (0)
11202#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11203 do { \
11204 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11205 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11206 return iemRaiseUndefinedOpcode(pVCpu); \
11207 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11208 return iemRaiseDeviceNotAvailable(pVCpu); \
11209 } while (0)
11210#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11211 do { \
11212 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11213 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11214 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11215 return iemRaiseUndefinedOpcode(pVCpu); \
11216 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11217 return iemRaiseDeviceNotAvailable(pVCpu); \
11218 } while (0)
11219#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11220 do { \
11221 if (pVCpu->iem.s.uCpl != 0) \
11222 return iemRaiseGeneralProtectionFault0(pVCpu); \
11223 } while (0)
11224#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11225 do { \
11226 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11227 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11228 } while (0)
11229#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11230 do { \
11231 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11232 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11233 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11234 return iemRaiseUndefinedOpcode(pVCpu); \
11235 } while (0)
11236#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11237 do { \
11238 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11239 return iemRaiseGeneralProtectionFault0(pVCpu); \
11240 } while (0)
11241
11242
11243#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11244#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11245#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11246#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11247#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11248#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11249#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11250 uint32_t a_Name; \
11251 uint32_t *a_pName = &a_Name
11252#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11253 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11254
11255#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11256#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11257
11258#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11259#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11260#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11261#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11262#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11263#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11264#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11265#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11266#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11267#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11268#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11269#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11270#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11271#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11272#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11273#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11274#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11275#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11276 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11277 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11278 } while (0)
11279#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11280 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11281 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11282 } while (0)
11283#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11284 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11285 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11286 } while (0)
11287/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11288#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11289 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11290 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11291 } while (0)
11292#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11293 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11294 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11295 } while (0)
11296/** @note Not for IOPL or IF testing or modification. */
11297#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11298#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11299#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11300#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11301
11302#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11303#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11304#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11305#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11306#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11307#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11308#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11309#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11310#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11311#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11312/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11313#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11314 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11315 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11316 } while (0)
11317#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11318 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11319 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11320 } while (0)
11321#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11322 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11323
11324
11325#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11326#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11327/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11328 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11329#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11330#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11331/** @note Not for IOPL or IF testing or modification. */
11332#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11333
11334#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11335#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11336#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11337 do { \
11338 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11339 *pu32Reg += (a_u32Value); \
11340 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11341 } while (0)
11342#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11343
11344#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11345#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11346#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11347 do { \
11348 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11349 *pu32Reg -= (a_u32Value); \
11350 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11351 } while (0)
11352#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11353#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11354
11355#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11356#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11357#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11358#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11359#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11360#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11361#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11362
11363#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11364#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11365#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11366#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11367
11368#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11369#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11370#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11371
11372#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11373#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11374#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11375
11376#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11377#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11378#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11379
11380#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11381#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11382#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11383
11384#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11385
11386#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11387
11388#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11389#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11390#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11391 do { \
11392 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11393 *pu32Reg &= (a_u32Value); \
11394 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11395 } while (0)
11396#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11397
11398#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11399#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11400#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11401 do { \
11402 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11403 *pu32Reg |= (a_u32Value); \
11404 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11405 } while (0)
11406#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11407
11408
11409/** @note Not for IOPL or IF modification. */
11410#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11411/** @note Not for IOPL or IF modification. */
11412#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11413/** @note Not for IOPL or IF modification. */
11414#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11415
11416#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11417
11418/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11419#define IEM_MC_FPU_TO_MMX_MODE() do { \
11420 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11421 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11422 } while (0)
11423
11424/** Switches the FPU state from MMX mode (FTW=0xffff). */
11425#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11426 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11427 } while (0)
11428
11429#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11430 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11431#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11432 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11433#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11434 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11435 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11436 } while (0)
11437#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11438 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11439 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11440 } while (0)
11441#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11442 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11443#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11444 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11445#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11446 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11447
11448#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11449 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11450 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11451 } while (0)
11452#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11453 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11454#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11455 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11456#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11457 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11458#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11459 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11460 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11461 } while (0)
11462#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11463 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11464#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11465 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11466 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11467 } while (0)
11468#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11469 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11470#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11471 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11472 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11473 } while (0)
11474#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11475 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11476#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11477 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11478#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11479 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11480#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11481 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11482#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11483 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11484 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11485 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11486 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11487 } while (0)
11488
11489#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11490 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11491 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11492 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11493 } while (0)
11494#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11495 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11496 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11497 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11498 } while (0)
11499#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11500 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11501 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11502 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11503 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11504 } while (0)
11505#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11506 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11507 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11508 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11509 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11510 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11511 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11512 } while (0)
11513
11514#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11515#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11516 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11517 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11518 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11519 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11520 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11521 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11522 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11523 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11524 } while (0)
11525#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11526 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11527 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11528 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11529 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11530 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11531 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11532 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11533 } while (0)
11534#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11535 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11536 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11537 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11538 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11539 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11540 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11541 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11542 } while (0)
11543#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11544 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11545 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11546 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11547 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11548 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11549 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11550 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11551 } while (0)
11552
11553#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11554 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11555#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11556 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11557#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11558 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11559#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11560 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11561 uintptr_t const iYRegTmp = (a_iYReg); \
11562 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11563 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11564 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11565 } while (0)
11566
11567#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11568 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11569 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11570 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11571 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11572 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11573 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11574 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11575 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11576 } while (0)
11577#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11578 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11579 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11580 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11581 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11582 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11583 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11584 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11585 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11586 } while (0)
11587#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11588 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11589 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11590 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11591 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11592 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11593 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11594 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11595 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11596 } while (0)
11597
11598#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11599 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11600 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11601 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11602 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11603 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11604 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11605 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11606 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11607 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11608 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11609 } while (0)
11610#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11611 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11612 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11613 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11614 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11615 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11616 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11617 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11618 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11619 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11620 } while (0)
11621#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11622 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11623 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11624 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11625 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11626 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11627 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11628 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11629 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11630 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11631 } while (0)
11632#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11633 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11634 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11635 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11636 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11637 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11638 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11639 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11640 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11641 } while (0)
11642
11643#ifndef IEM_WITH_SETJMP
11644# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11645 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11646# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11648# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11650#else
11651# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11652 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11653# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11654 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11655# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11656 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11657#endif
11658
11659#ifndef IEM_WITH_SETJMP
11660# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11664# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11666#else
11667# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11668 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11669# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11670 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11671# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11672 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11673#endif
11674
11675#ifndef IEM_WITH_SETJMP
11676# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11680# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11682#else
11683# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11684 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11685# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11686 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11687# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11688 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11689#endif
11690
11691#ifdef SOME_UNUSED_FUNCTION
11692# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11694#endif
11695
11696#ifndef IEM_WITH_SETJMP
11697# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11699# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11701# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11703# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11705#else
11706# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11707 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11708# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11709 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11710# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11711 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11713 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11714#endif
11715
11716#ifndef IEM_WITH_SETJMP
11717# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11719# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11721# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11723#else
11724# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11725 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11726# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11727 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11729 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11730#endif
11731
11732#ifndef IEM_WITH_SETJMP
11733# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11735# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11737#else
11738# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11739 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11740# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11741 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11742#endif
11743
11744#ifndef IEM_WITH_SETJMP
11745# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11749#else
11750# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11751 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11752# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11753 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11754#endif
11755
11756
11757
11758#ifndef IEM_WITH_SETJMP
11759# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11760 do { \
11761 uint8_t u8Tmp; \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11763 (a_u16Dst) = u8Tmp; \
11764 } while (0)
11765# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11766 do { \
11767 uint8_t u8Tmp; \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11769 (a_u32Dst) = u8Tmp; \
11770 } while (0)
11771# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11772 do { \
11773 uint8_t u8Tmp; \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11775 (a_u64Dst) = u8Tmp; \
11776 } while (0)
11777# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11778 do { \
11779 uint16_t u16Tmp; \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11781 (a_u32Dst) = u16Tmp; \
11782 } while (0)
11783# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11784 do { \
11785 uint16_t u16Tmp; \
11786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11787 (a_u64Dst) = u16Tmp; \
11788 } while (0)
11789# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11790 do { \
11791 uint32_t u32Tmp; \
11792 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11793 (a_u64Dst) = u32Tmp; \
11794 } while (0)
11795#else /* IEM_WITH_SETJMP */
11796# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11797 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11798# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11799 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11800# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11801 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11802# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11803 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11804# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11805 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11806# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11807 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11808#endif /* IEM_WITH_SETJMP */
11809
11810#ifndef IEM_WITH_SETJMP
11811# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11812 do { \
11813 uint8_t u8Tmp; \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11815 (a_u16Dst) = (int8_t)u8Tmp; \
11816 } while (0)
11817# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11818 do { \
11819 uint8_t u8Tmp; \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11821 (a_u32Dst) = (int8_t)u8Tmp; \
11822 } while (0)
11823# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11824 do { \
11825 uint8_t u8Tmp; \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11827 (a_u64Dst) = (int8_t)u8Tmp; \
11828 } while (0)
11829# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11830 do { \
11831 uint16_t u16Tmp; \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11833 (a_u32Dst) = (int16_t)u16Tmp; \
11834 } while (0)
11835# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11836 do { \
11837 uint16_t u16Tmp; \
11838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11839 (a_u64Dst) = (int16_t)u16Tmp; \
11840 } while (0)
11841# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11842 do { \
11843 uint32_t u32Tmp; \
11844 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11845 (a_u64Dst) = (int32_t)u32Tmp; \
11846 } while (0)
11847#else /* IEM_WITH_SETJMP */
11848# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11849 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11850# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11851 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11852# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11853 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11854# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11855 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11856# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11857 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11858# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11859 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11860#endif /* IEM_WITH_SETJMP */
11861
11862#ifndef IEM_WITH_SETJMP
11863# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11864 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11865# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11867# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11869# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11871#else
11872# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11873 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11874# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11875 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11876# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11877 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11878# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11879 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11880#endif
11881
11882#ifndef IEM_WITH_SETJMP
11883# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11885# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11887# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11888 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11889# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11891#else
11892# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11893 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11894# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11895 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11896# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11897 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11898# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11899 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11900#endif
11901
11902#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11903#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11904#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11905#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11906#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11907#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11908#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11909 do { \
11910 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11911 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11912 } while (0)
11913
11914#ifndef IEM_WITH_SETJMP
11915# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11916 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11917# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11918 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11919#else
11920# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11921 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11922# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11923 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11924#endif
11925
11926#ifndef IEM_WITH_SETJMP
11927# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11928 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11929# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11930 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11931#else
11932# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11933 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11934# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11935 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11936#endif
11937
11938
11939#define IEM_MC_PUSH_U16(a_u16Value) \
11940 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11941#define IEM_MC_PUSH_U32(a_u32Value) \
11942 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11943#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11944 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11945#define IEM_MC_PUSH_U64(a_u64Value) \
11946 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11947
11948#define IEM_MC_POP_U16(a_pu16Value) \
11949 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11950#define IEM_MC_POP_U32(a_pu32Value) \
11951 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11952#define IEM_MC_POP_U64(a_pu64Value) \
11953 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11954
11955/** Maps guest memory for direct or bounce buffered access.
11956 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11957 * @remarks May return.
11958 */
11959#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11960 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11961
11962/** Maps guest memory for direct or bounce buffered access.
11963 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11964 * @remarks May return.
11965 */
11966#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11967 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11968
11969/** Commits the memory and unmaps the guest memory.
11970 * @remarks May return.
11971 */
11972#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11973 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11974
11975/** Commits the memory and unmaps the guest memory unless the FPU status word
11976 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11977 * that would cause FLD not to store.
11978 *
11979 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11980 * store, while \#P will not.
11981 *
11982 * @remarks May in theory return - for now.
11983 */
11984#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11985 do { \
11986 if ( !(a_u16FSW & X86_FSW_ES) \
11987 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11988 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11989 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11990 } while (0)
11991
11992/** Calculate efficient address from R/M. */
11993#ifndef IEM_WITH_SETJMP
11994# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11995 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11996#else
11997# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11998 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11999#endif
12000
12001#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12002#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12003#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12004#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12005#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12006#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12007#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12008
12009/**
12010 * Defers the rest of the instruction emulation to a C implementation routine
12011 * and returns, only taking the standard parameters.
12012 *
12013 * @param a_pfnCImpl The pointer to the C routine.
12014 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12015 */
12016#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12017
12018/**
12019 * Defers the rest of instruction emulation to a C implementation routine and
12020 * returns, taking one argument in addition to the standard ones.
12021 *
12022 * @param a_pfnCImpl The pointer to the C routine.
12023 * @param a0 The argument.
12024 */
12025#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12026
12027/**
12028 * Defers the rest of the instruction emulation to a C implementation routine
12029 * and returns, taking two arguments in addition to the standard ones.
12030 *
12031 * @param a_pfnCImpl The pointer to the C routine.
12032 * @param a0 The first extra argument.
12033 * @param a1 The second extra argument.
12034 */
12035#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12036
12037/**
12038 * Defers the rest of the instruction emulation to a C implementation routine
12039 * and returns, taking three arguments in addition to the standard ones.
12040 *
12041 * @param a_pfnCImpl The pointer to the C routine.
12042 * @param a0 The first extra argument.
12043 * @param a1 The second extra argument.
12044 * @param a2 The third extra argument.
12045 */
12046#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12047
12048/**
12049 * Defers the rest of the instruction emulation to a C implementation routine
12050 * and returns, taking four arguments in addition to the standard ones.
12051 *
12052 * @param a_pfnCImpl The pointer to the C routine.
12053 * @param a0 The first extra argument.
12054 * @param a1 The second extra argument.
12055 * @param a2 The third extra argument.
12056 * @param a3 The fourth extra argument.
12057 */
12058#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12059
12060/**
12061 * Defers the rest of the instruction emulation to a C implementation routine
12062 * and returns, taking two arguments in addition to the standard ones.
12063 *
12064 * @param a_pfnCImpl The pointer to the C routine.
12065 * @param a0 The first extra argument.
12066 * @param a1 The second extra argument.
12067 * @param a2 The third extra argument.
12068 * @param a3 The fourth extra argument.
12069 * @param a4 The fifth extra argument.
12070 */
12071#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12072
12073/**
12074 * Defers the entire instruction emulation to a C implementation routine and
12075 * returns, only taking the standard parameters.
12076 *
12077 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12078 *
12079 * @param a_pfnCImpl The pointer to the C routine.
12080 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12081 */
12082#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12083
12084/**
12085 * Defers the entire instruction emulation to a C implementation routine and
12086 * returns, taking one argument in addition to the standard ones.
12087 *
12088 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12089 *
12090 * @param a_pfnCImpl The pointer to the C routine.
12091 * @param a0 The argument.
12092 */
12093#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12094
12095/**
12096 * Defers the entire instruction emulation to a C implementation routine and
12097 * returns, taking two arguments in addition to the standard ones.
12098 *
12099 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12100 *
12101 * @param a_pfnCImpl The pointer to the C routine.
12102 * @param a0 The first extra argument.
12103 * @param a1 The second extra argument.
12104 */
12105#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12106
12107/**
12108 * Defers the entire instruction emulation to a C implementation routine and
12109 * returns, taking three arguments in addition to the standard ones.
12110 *
12111 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12112 *
12113 * @param a_pfnCImpl The pointer to the C routine.
12114 * @param a0 The first extra argument.
12115 * @param a1 The second extra argument.
12116 * @param a2 The third extra argument.
12117 */
12118#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12119
12120/**
12121 * Calls a FPU assembly implementation taking one visible argument.
12122 *
12123 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12124 * @param a0 The first extra argument.
12125 */
12126#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12127 do { \
12128 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12129 } while (0)
12130
12131/**
12132 * Calls a FPU assembly implementation taking two visible arguments.
12133 *
12134 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12135 * @param a0 The first extra argument.
12136 * @param a1 The second extra argument.
12137 */
12138#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12139 do { \
12140 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12141 } while (0)
12142
12143/**
12144 * Calls a FPU assembly implementation taking three visible arguments.
12145 *
12146 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12147 * @param a0 The first extra argument.
12148 * @param a1 The second extra argument.
12149 * @param a2 The third extra argument.
12150 */
12151#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12152 do { \
12153 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12154 } while (0)
12155
12156#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12157 do { \
12158 (a_FpuData).FSW = (a_FSW); \
12159 (a_FpuData).r80Result = *(a_pr80Value); \
12160 } while (0)
12161
12162/** Pushes FPU result onto the stack. */
12163#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12164 iemFpuPushResult(pVCpu, &a_FpuData)
12165/** Pushes FPU result onto the stack and sets the FPUDP. */
12166#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12167 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12168
12169/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12170#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12171 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12172
12173/** Stores FPU result in a stack register. */
12174#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12175 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12176/** Stores FPU result in a stack register and pops the stack. */
12177#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12178 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12179/** Stores FPU result in a stack register and sets the FPUDP. */
12180#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12181 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12182/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12183 * stack. */
12184#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12185 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12186
12187/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12188#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12189 iemFpuUpdateOpcodeAndIp(pVCpu)
12190/** Free a stack register (for FFREE and FFREEP). */
12191#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12192 iemFpuStackFree(pVCpu, a_iStReg)
12193/** Increment the FPU stack pointer. */
12194#define IEM_MC_FPU_STACK_INC_TOP() \
12195 iemFpuStackIncTop(pVCpu)
12196/** Decrement the FPU stack pointer. */
12197#define IEM_MC_FPU_STACK_DEC_TOP() \
12198 iemFpuStackDecTop(pVCpu)
12199
12200/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12201#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12202 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12203/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12204#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12205 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12206/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12207#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12208 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12209/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12210#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12211 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12212/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12213 * stack. */
12214#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12215 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12216/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12217#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12218 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12219
12220/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12221#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12222 iemFpuStackUnderflow(pVCpu, a_iStDst)
12223/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12224 * stack. */
12225#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12226 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12227/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12228 * FPUDS. */
12229#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12230 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12231/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12232 * FPUDS. Pops stack. */
12233#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12234 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12235/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12236 * stack twice. */
12237#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12238 iemFpuStackUnderflowThenPopPop(pVCpu)
12239/** Raises a FPU stack underflow exception for an instruction pushing a result
12240 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12241#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12242 iemFpuStackPushUnderflow(pVCpu)
12243/** Raises a FPU stack underflow exception for an instruction pushing a result
12244 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12245#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12246 iemFpuStackPushUnderflowTwo(pVCpu)
12247
12248/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12249 * FPUIP, FPUCS and FOP. */
12250#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12251 iemFpuStackPushOverflow(pVCpu)
12252/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12253 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12254#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12255 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12256/** Prepares for using the FPU state.
12257 * Ensures that we can use the host FPU in the current context (RC+R0.
12258 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12259#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12260/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12261#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12262/** Actualizes the guest FPU state so it can be accessed and modified. */
12263#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12264
12265/** Prepares for using the SSE state.
12266 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12267 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12268#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12269/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12270#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12271/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12272#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12273
12274/** Prepares for using the AVX state.
12275 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12276 * Ensures the guest AVX state in the CPUMCTX is up to date.
12277 * @note This will include the AVX512 state too when support for it is added
12278 * due to the zero extending feature of VEX instruction. */
12279#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12280/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12281#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12282/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12283#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12284
12285/**
12286 * Calls a MMX assembly implementation taking two visible arguments.
12287 *
12288 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12289 * @param a0 The first extra argument.
12290 * @param a1 The second extra argument.
12291 */
12292#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12293 do { \
12294 IEM_MC_PREPARE_FPU_USAGE(); \
12295 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12296 } while (0)
12297
12298/**
12299 * Calls a MMX assembly implementation taking three visible arguments.
12300 *
12301 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12302 * @param a0 The first extra argument.
12303 * @param a1 The second extra argument.
12304 * @param a2 The third extra argument.
12305 */
12306#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12307 do { \
12308 IEM_MC_PREPARE_FPU_USAGE(); \
12309 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12310 } while (0)
12311
12312
12313/**
12314 * Calls a SSE assembly implementation taking two visible arguments.
12315 *
12316 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12317 * @param a0 The first extra argument.
12318 * @param a1 The second extra argument.
12319 */
12320#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12321 do { \
12322 IEM_MC_PREPARE_SSE_USAGE(); \
12323 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12324 } while (0)
12325
12326/**
12327 * Calls a SSE assembly implementation taking three visible arguments.
12328 *
12329 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12330 * @param a0 The first extra argument.
12331 * @param a1 The second extra argument.
12332 * @param a2 The third extra argument.
12333 */
12334#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12335 do { \
12336 IEM_MC_PREPARE_SSE_USAGE(); \
12337 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12338 } while (0)
12339
12340
12341/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12342 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12343#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12344 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12345
12346/**
12347 * Calls a AVX assembly implementation taking two visible arguments.
12348 *
12349 * There is one implicit zero'th argument, a pointer to the extended state.
12350 *
12351 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12352 * @param a1 The first extra argument.
12353 * @param a2 The second extra argument.
12354 */
12355#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12356 do { \
12357 IEM_MC_PREPARE_AVX_USAGE(); \
12358 a_pfnAImpl(pXState, (a1), (a2)); \
12359 } while (0)
12360
12361/**
12362 * Calls a AVX assembly implementation taking three visible arguments.
12363 *
12364 * There is one implicit zero'th argument, a pointer to the extended state.
12365 *
12366 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12367 * @param a1 The first extra argument.
12368 * @param a2 The second extra argument.
12369 * @param a3 The third extra argument.
12370 */
12371#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12372 do { \
12373 IEM_MC_PREPARE_AVX_USAGE(); \
12374 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12375 } while (0)
12376
12377/** @note Not for IOPL or IF testing. */
12378#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12379/** @note Not for IOPL or IF testing. */
12380#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12381/** @note Not for IOPL or IF testing. */
12382#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12383/** @note Not for IOPL or IF testing. */
12384#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12385/** @note Not for IOPL or IF testing. */
12386#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12387 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12388 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12389/** @note Not for IOPL or IF testing. */
12390#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12391 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12392 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12393/** @note Not for IOPL or IF testing. */
12394#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12395 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12396 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12397 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12398/** @note Not for IOPL or IF testing. */
12399#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12400 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12401 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12402 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12403#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12404#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12405#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12406/** @note Not for IOPL or IF testing. */
12407#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12408 if ( pVCpu->cpum.GstCtx.cx != 0 \
12409 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12410/** @note Not for IOPL or IF testing. */
12411#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12412 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12413 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12414/** @note Not for IOPL or IF testing. */
12415#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12416 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12417 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12418/** @note Not for IOPL or IF testing. */
12419#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12420 if ( pVCpu->cpum.GstCtx.cx != 0 \
12421 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12422/** @note Not for IOPL or IF testing. */
12423#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12424 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12425 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12426/** @note Not for IOPL or IF testing. */
12427#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12428 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12429 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12430#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12431#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12432
12433#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12434 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12435#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12436 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12437#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12438 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12439#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12440 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12441#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12442 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12443#define IEM_MC_IF_FCW_IM() \
12444 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12445
12446#define IEM_MC_ELSE() } else {
12447#define IEM_MC_ENDIF() } do {} while (0)
12448
12449/** @} */
12450
12451
12452/** @name Opcode Debug Helpers.
12453 * @{
12454 */
12455#ifdef VBOX_WITH_STATISTICS
12456# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12457#else
12458# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12459#endif
12460
12461#ifdef DEBUG
12462# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12463 do { \
12464 IEMOP_INC_STATS(a_Stats); \
12465 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12466 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12467 } while (0)
12468
12469# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12470 do { \
12471 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12472 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12473 (void)RT_CONCAT(OP_,a_Upper); \
12474 (void)(a_fDisHints); \
12475 (void)(a_fIemHints); \
12476 } while (0)
12477
12478# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12479 do { \
12480 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12481 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12482 (void)RT_CONCAT(OP_,a_Upper); \
12483 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12484 (void)(a_fDisHints); \
12485 (void)(a_fIemHints); \
12486 } while (0)
12487
12488# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12489 do { \
12490 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12491 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12492 (void)RT_CONCAT(OP_,a_Upper); \
12493 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12494 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12495 (void)(a_fDisHints); \
12496 (void)(a_fIemHints); \
12497 } while (0)
12498
12499# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12500 do { \
12501 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12502 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12503 (void)RT_CONCAT(OP_,a_Upper); \
12504 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12505 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12506 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12507 (void)(a_fDisHints); \
12508 (void)(a_fIemHints); \
12509 } while (0)
12510
12511# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12512 do { \
12513 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12514 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12515 (void)RT_CONCAT(OP_,a_Upper); \
12516 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12517 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12518 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12519 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12520 (void)(a_fDisHints); \
12521 (void)(a_fIemHints); \
12522 } while (0)
12523
12524#else
12525# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12526
12527# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12528 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12529# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12530 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12531# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12532 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12533# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12534 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12535# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12536 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12537
12538#endif
12539
12540#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12541 IEMOP_MNEMONIC0EX(a_Lower, \
12542 #a_Lower, \
12543 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12544#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12545 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12546 #a_Lower " " #a_Op1, \
12547 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12548#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12549 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12550 #a_Lower " " #a_Op1 "," #a_Op2, \
12551 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12552#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12553 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12554 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12555 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12556#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12557 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12558 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12559 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12560
12561/** @} */
12562
12563
12564/** @name Opcode Helpers.
12565 * @{
12566 */
12567
12568#ifdef IN_RING3
12569# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12570 do { \
12571 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12572 else \
12573 { \
12574 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12575 return IEMOP_RAISE_INVALID_OPCODE(); \
12576 } \
12577 } while (0)
12578#else
12579# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12580 do { \
12581 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12582 else return IEMOP_RAISE_INVALID_OPCODE(); \
12583 } while (0)
12584#endif
12585
12586/** The instruction requires a 186 or later. */
12587#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12588# define IEMOP_HLP_MIN_186() do { } while (0)
12589#else
12590# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12591#endif
12592
12593/** The instruction requires a 286 or later. */
12594#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12595# define IEMOP_HLP_MIN_286() do { } while (0)
12596#else
12597# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12598#endif
12599
12600/** The instruction requires a 386 or later. */
12601#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12602# define IEMOP_HLP_MIN_386() do { } while (0)
12603#else
12604# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12605#endif
12606
12607/** The instruction requires a 386 or later if the given expression is true. */
12608#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12609# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12610#else
12611# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12612#endif
12613
12614/** The instruction requires a 486 or later. */
12615#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12616# define IEMOP_HLP_MIN_486() do { } while (0)
12617#else
12618# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12619#endif
12620
12621/** The instruction requires a Pentium (586) or later. */
12622#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12623# define IEMOP_HLP_MIN_586() do { } while (0)
12624#else
12625# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12626#endif
12627
12628/** The instruction requires a PentiumPro (686) or later. */
12629#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12630# define IEMOP_HLP_MIN_686() do { } while (0)
12631#else
12632# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12633#endif
12634
12635
12636/** The instruction raises an \#UD in real and V8086 mode. */
12637#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12638 do \
12639 { \
12640 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12641 else return IEMOP_RAISE_INVALID_OPCODE(); \
12642 } while (0)
12643
12644#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12645/** This instruction raises an \#UD in real and V8086 mode or when not using a
12646 * 64-bit code segment when in long mode (applicable to all VMX instructions
12647 * except VMCALL).
12648 */
12649#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12650 do \
12651 { \
12652 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12653 && ( !IEM_IS_LONG_MODE(pVCpu) \
12654 || IEM_IS_64BIT_CODE(pVCpu))) \
12655 { /* likely */ } \
12656 else \
12657 { \
12658 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12659 { \
12660 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12661 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12662 return IEMOP_RAISE_INVALID_OPCODE(); \
12663 } \
12664 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12665 { \
12666 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12667 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12668 return IEMOP_RAISE_INVALID_OPCODE(); \
12669 } \
12670 } \
12671 } while (0)
12672
12673/** The instruction can only be executed in VMX operation (VMX root mode and
12674 * non-root mode).
12675 *
12676 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12677 */
12678# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12679 do \
12680 { \
12681 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12682 else \
12683 { \
12684 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12685 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12686 return IEMOP_RAISE_INVALID_OPCODE(); \
12687 } \
12688 } while (0)
12689#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12690
12691/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12692 * 64-bit mode. */
12693#define IEMOP_HLP_NO_64BIT() \
12694 do \
12695 { \
12696 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12697 return IEMOP_RAISE_INVALID_OPCODE(); \
12698 } while (0)
12699
12700/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12701 * 64-bit mode. */
12702#define IEMOP_HLP_ONLY_64BIT() \
12703 do \
12704 { \
12705 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12706 return IEMOP_RAISE_INVALID_OPCODE(); \
12707 } while (0)
12708
12709/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12710#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12711 do \
12712 { \
12713 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12714 iemRecalEffOpSize64Default(pVCpu); \
12715 } while (0)
12716
12717/** The instruction has 64-bit operand size if 64-bit mode. */
12718#define IEMOP_HLP_64BIT_OP_SIZE() \
12719 do \
12720 { \
12721 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12722 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12723 } while (0)
12724
12725/** Only a REX prefix immediately preceeding the first opcode byte takes
12726 * effect. This macro helps ensuring this as well as logging bad guest code. */
12727#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12728 do \
12729 { \
12730 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12731 { \
12732 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12733 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12734 pVCpu->iem.s.uRexB = 0; \
12735 pVCpu->iem.s.uRexIndex = 0; \
12736 pVCpu->iem.s.uRexReg = 0; \
12737 iemRecalEffOpSize(pVCpu); \
12738 } \
12739 } while (0)
12740
12741/**
12742 * Done decoding.
12743 */
12744#define IEMOP_HLP_DONE_DECODING() \
12745 do \
12746 { \
12747 /*nothing for now, maybe later... */ \
12748 } while (0)
12749
12750/**
12751 * Done decoding, raise \#UD exception if lock prefix present.
12752 */
12753#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12754 do \
12755 { \
12756 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12757 { /* likely */ } \
12758 else \
12759 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12760 } while (0)
12761
12762
12763/**
12764 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12765 * repnz or size prefixes are present, or if in real or v8086 mode.
12766 */
12767#define IEMOP_HLP_DONE_VEX_DECODING() \
12768 do \
12769 { \
12770 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12771 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12772 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12773 { /* likely */ } \
12774 else \
12775 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12776 } while (0)
12777
12778/**
12779 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12780 * repnz or size prefixes are present, or if in real or v8086 mode.
12781 */
12782#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12783 do \
12784 { \
12785 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12786 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12787 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12788 && pVCpu->iem.s.uVexLength == 0)) \
12789 { /* likely */ } \
12790 else \
12791 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12792 } while (0)
12793
12794
12795/**
12796 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12797 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12798 * register 0, or if in real or v8086 mode.
12799 */
12800#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12801 do \
12802 { \
12803 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12804 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12805 && !pVCpu->iem.s.uVex3rdReg \
12806 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12807 { /* likely */ } \
12808 else \
12809 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12810 } while (0)
12811
12812/**
12813 * Done decoding VEX, no V, L=0.
12814 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12815 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12816 */
12817#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12818 do \
12819 { \
12820 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12821 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12822 && pVCpu->iem.s.uVexLength == 0 \
12823 && pVCpu->iem.s.uVex3rdReg == 0 \
12824 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12825 { /* likely */ } \
12826 else \
12827 return IEMOP_RAISE_INVALID_OPCODE(); \
12828 } while (0)
12829
12830#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12831 do \
12832 { \
12833 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12834 { /* likely */ } \
12835 else \
12836 { \
12837 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12838 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12839 } \
12840 } while (0)
12841#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12842 do \
12843 { \
12844 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12845 { /* likely */ } \
12846 else \
12847 { \
12848 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12849 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12850 } \
12851 } while (0)
12852
12853/**
12854 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12855 * are present.
12856 */
12857#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12858 do \
12859 { \
12860 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12861 { /* likely */ } \
12862 else \
12863 return IEMOP_RAISE_INVALID_OPCODE(); \
12864 } while (0)
12865
12866/**
12867 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12868 * prefixes are present.
12869 */
12870#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12871 do \
12872 { \
12873 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12874 { /* likely */ } \
12875 else \
12876 return IEMOP_RAISE_INVALID_OPCODE(); \
12877 } while (0)
12878
12879
12880/**
12881 * Calculates the effective address of a ModR/M memory operand.
12882 *
12883 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12884 *
12885 * @return Strict VBox status code.
12886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12887 * @param bRm The ModRM byte.
12888 * @param cbImm The size of any immediate following the
12889 * effective address opcode bytes. Important for
12890 * RIP relative addressing.
12891 * @param pGCPtrEff Where to return the effective address.
12892 */
12893IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12894{
12895 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12896# define SET_SS_DEF() \
12897 do \
12898 { \
12899 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12900 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12901 } while (0)
12902
12903 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12904 {
12905/** @todo Check the effective address size crap! */
12906 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12907 {
12908 uint16_t u16EffAddr;
12909
12910 /* Handle the disp16 form with no registers first. */
12911 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12912 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12913 else
12914 {
12915 /* Get the displacment. */
12916 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12917 {
12918 case 0: u16EffAddr = 0; break;
12919 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12920 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12921 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12922 }
12923
12924 /* Add the base and index registers to the disp. */
12925 switch (bRm & X86_MODRM_RM_MASK)
12926 {
12927 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12928 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12929 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12930 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12931 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12932 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12933 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12934 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12935 }
12936 }
12937
12938 *pGCPtrEff = u16EffAddr;
12939 }
12940 else
12941 {
12942 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12943 uint32_t u32EffAddr;
12944
12945 /* Handle the disp32 form with no registers first. */
12946 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12947 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12948 else
12949 {
12950 /* Get the register (or SIB) value. */
12951 switch ((bRm & X86_MODRM_RM_MASK))
12952 {
12953 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12954 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12955 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12956 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12957 case 4: /* SIB */
12958 {
12959 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12960
12961 /* Get the index and scale it. */
12962 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12963 {
12964 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12965 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12966 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12967 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12968 case 4: u32EffAddr = 0; /*none */ break;
12969 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12970 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12971 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12972 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12973 }
12974 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12975
12976 /* add base */
12977 switch (bSib & X86_SIB_BASE_MASK)
12978 {
12979 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12980 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12981 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12982 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12983 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12984 case 5:
12985 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12986 {
12987 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12988 SET_SS_DEF();
12989 }
12990 else
12991 {
12992 uint32_t u32Disp;
12993 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12994 u32EffAddr += u32Disp;
12995 }
12996 break;
12997 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12998 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12999 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13000 }
13001 break;
13002 }
13003 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13004 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13005 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13006 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13007 }
13008
13009 /* Get and add the displacement. */
13010 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13011 {
13012 case 0:
13013 break;
13014 case 1:
13015 {
13016 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13017 u32EffAddr += i8Disp;
13018 break;
13019 }
13020 case 2:
13021 {
13022 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13023 u32EffAddr += u32Disp;
13024 break;
13025 }
13026 default:
13027 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13028 }
13029
13030 }
13031 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13032 *pGCPtrEff = u32EffAddr;
13033 else
13034 {
13035 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13036 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13037 }
13038 }
13039 }
13040 else
13041 {
13042 uint64_t u64EffAddr;
13043
13044 /* Handle the rip+disp32 form with no registers first. */
13045 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13046 {
13047 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13048 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13049 }
13050 else
13051 {
13052 /* Get the register (or SIB) value. */
13053 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13054 {
13055 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13056 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13057 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13058 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13059 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13060 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13061 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13062 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13063 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13064 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13065 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13066 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13067 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13068 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13069 /* SIB */
13070 case 4:
13071 case 12:
13072 {
13073 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13074
13075 /* Get the index and scale it. */
13076 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13077 {
13078 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13079 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13080 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13081 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13082 case 4: u64EffAddr = 0; /*none */ break;
13083 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13084 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13085 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13086 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13087 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13088 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13089 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13090 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13091 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13092 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13093 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13094 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13095 }
13096 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13097
13098 /* add base */
13099 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13100 {
13101 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13102 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13103 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13104 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13105 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13106 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13107 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13108 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13109 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13110 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13111 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13112 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13113 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13114 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13115 /* complicated encodings */
13116 case 5:
13117 case 13:
13118 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13119 {
13120 if (!pVCpu->iem.s.uRexB)
13121 {
13122 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13123 SET_SS_DEF();
13124 }
13125 else
13126 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13127 }
13128 else
13129 {
13130 uint32_t u32Disp;
13131 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13132 u64EffAddr += (int32_t)u32Disp;
13133 }
13134 break;
13135 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13136 }
13137 break;
13138 }
13139 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13140 }
13141
13142 /* Get and add the displacement. */
13143 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13144 {
13145 case 0:
13146 break;
13147 case 1:
13148 {
13149 int8_t i8Disp;
13150 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13151 u64EffAddr += i8Disp;
13152 break;
13153 }
13154 case 2:
13155 {
13156 uint32_t u32Disp;
13157 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13158 u64EffAddr += (int32_t)u32Disp;
13159 break;
13160 }
13161 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13162 }
13163
13164 }
13165
13166 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13167 *pGCPtrEff = u64EffAddr;
13168 else
13169 {
13170 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13171 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13172 }
13173 }
13174
13175 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13176 return VINF_SUCCESS;
13177}
13178
13179
13180/**
13181 * Calculates the effective address of a ModR/M memory operand.
13182 *
13183 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13184 *
13185 * @return Strict VBox status code.
13186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13187 * @param bRm The ModRM byte.
13188 * @param cbImm The size of any immediate following the
13189 * effective address opcode bytes. Important for
13190 * RIP relative addressing.
13191 * @param pGCPtrEff Where to return the effective address.
13192 * @param offRsp RSP displacement.
13193 */
13194IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13195{
13196 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13197# define SET_SS_DEF() \
13198 do \
13199 { \
13200 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13201 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13202 } while (0)
13203
13204 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13205 {
13206/** @todo Check the effective address size crap! */
13207 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13208 {
13209 uint16_t u16EffAddr;
13210
13211 /* Handle the disp16 form with no registers first. */
13212 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13213 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13214 else
13215 {
13216 /* Get the displacment. */
13217 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13218 {
13219 case 0: u16EffAddr = 0; break;
13220 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13221 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13222 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13223 }
13224
13225 /* Add the base and index registers to the disp. */
13226 switch (bRm & X86_MODRM_RM_MASK)
13227 {
13228 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13229 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13230 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13231 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13232 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13233 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13234 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13235 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13236 }
13237 }
13238
13239 *pGCPtrEff = u16EffAddr;
13240 }
13241 else
13242 {
13243 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13244 uint32_t u32EffAddr;
13245
13246 /* Handle the disp32 form with no registers first. */
13247 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13248 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13249 else
13250 {
13251 /* Get the register (or SIB) value. */
13252 switch ((bRm & X86_MODRM_RM_MASK))
13253 {
13254 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13255 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13256 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13257 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13258 case 4: /* SIB */
13259 {
13260 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13261
13262 /* Get the index and scale it. */
13263 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13264 {
13265 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13266 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13267 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13268 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13269 case 4: u32EffAddr = 0; /*none */ break;
13270 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13271 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13272 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13273 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13274 }
13275 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13276
13277 /* add base */
13278 switch (bSib & X86_SIB_BASE_MASK)
13279 {
13280 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13281 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13282 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13283 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13284 case 4:
13285 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13286 SET_SS_DEF();
13287 break;
13288 case 5:
13289 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13290 {
13291 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13292 SET_SS_DEF();
13293 }
13294 else
13295 {
13296 uint32_t u32Disp;
13297 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13298 u32EffAddr += u32Disp;
13299 }
13300 break;
13301 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13302 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13303 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13304 }
13305 break;
13306 }
13307 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13308 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13309 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13310 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13311 }
13312
13313 /* Get and add the displacement. */
13314 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13315 {
13316 case 0:
13317 break;
13318 case 1:
13319 {
13320 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13321 u32EffAddr += i8Disp;
13322 break;
13323 }
13324 case 2:
13325 {
13326 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13327 u32EffAddr += u32Disp;
13328 break;
13329 }
13330 default:
13331 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13332 }
13333
13334 }
13335 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13336 *pGCPtrEff = u32EffAddr;
13337 else
13338 {
13339 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13340 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13341 }
13342 }
13343 }
13344 else
13345 {
13346 uint64_t u64EffAddr;
13347
13348 /* Handle the rip+disp32 form with no registers first. */
13349 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13350 {
13351 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13352 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13353 }
13354 else
13355 {
13356 /* Get the register (or SIB) value. */
13357 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13358 {
13359 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13360 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13361 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13362 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13363 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13364 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13365 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13366 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13367 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13368 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13369 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13370 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13371 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13372 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13373 /* SIB */
13374 case 4:
13375 case 12:
13376 {
13377 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13378
13379 /* Get the index and scale it. */
13380 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13381 {
13382 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13383 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13384 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13385 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13386 case 4: u64EffAddr = 0; /*none */ break;
13387 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13388 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13389 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13390 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13391 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13392 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13393 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13394 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13395 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13396 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13397 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13398 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13399 }
13400 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13401
13402 /* add base */
13403 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13404 {
13405 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13406 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13407 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13408 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13409 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13410 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13411 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13412 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13413 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13414 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13415 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13416 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13417 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13418 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13419 /* complicated encodings */
13420 case 5:
13421 case 13:
13422 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13423 {
13424 if (!pVCpu->iem.s.uRexB)
13425 {
13426 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13427 SET_SS_DEF();
13428 }
13429 else
13430 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13431 }
13432 else
13433 {
13434 uint32_t u32Disp;
13435 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13436 u64EffAddr += (int32_t)u32Disp;
13437 }
13438 break;
13439 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13440 }
13441 break;
13442 }
13443 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13444 }
13445
13446 /* Get and add the displacement. */
13447 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13448 {
13449 case 0:
13450 break;
13451 case 1:
13452 {
13453 int8_t i8Disp;
13454 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13455 u64EffAddr += i8Disp;
13456 break;
13457 }
13458 case 2:
13459 {
13460 uint32_t u32Disp;
13461 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13462 u64EffAddr += (int32_t)u32Disp;
13463 break;
13464 }
13465 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13466 }
13467
13468 }
13469
13470 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13471 *pGCPtrEff = u64EffAddr;
13472 else
13473 {
13474 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13475 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13476 }
13477 }
13478
13479 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13480 return VINF_SUCCESS;
13481}
13482
13483
13484#ifdef IEM_WITH_SETJMP
13485/**
13486 * Calculates the effective address of a ModR/M memory operand.
13487 *
13488 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13489 *
13490 * May longjmp on internal error.
13491 *
13492 * @return The effective address.
13493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13494 * @param bRm The ModRM byte.
13495 * @param cbImm The size of any immediate following the
13496 * effective address opcode bytes. Important for
13497 * RIP relative addressing.
13498 */
13499IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13500{
13501 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13502# define SET_SS_DEF() \
13503 do \
13504 { \
13505 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13506 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13507 } while (0)
13508
13509 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13510 {
13511/** @todo Check the effective address size crap! */
13512 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13513 {
13514 uint16_t u16EffAddr;
13515
13516 /* Handle the disp16 form with no registers first. */
13517 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13518 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13519 else
13520 {
13521 /* Get the displacment. */
13522 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13523 {
13524 case 0: u16EffAddr = 0; break;
13525 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13526 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13527 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13528 }
13529
13530 /* Add the base and index registers to the disp. */
13531 switch (bRm & X86_MODRM_RM_MASK)
13532 {
13533 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13534 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13535 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13536 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13537 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13538 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13539 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13540 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13541 }
13542 }
13543
13544 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13545 return u16EffAddr;
13546 }
13547
13548 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13549 uint32_t u32EffAddr;
13550
13551 /* Handle the disp32 form with no registers first. */
13552 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13553 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13554 else
13555 {
13556 /* Get the register (or SIB) value. */
13557 switch ((bRm & X86_MODRM_RM_MASK))
13558 {
13559 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13560 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13561 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13562 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13563 case 4: /* SIB */
13564 {
13565 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13566
13567 /* Get the index and scale it. */
13568 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13569 {
13570 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13571 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13572 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13573 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13574 case 4: u32EffAddr = 0; /*none */ break;
13575 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13576 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13577 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13578 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13579 }
13580 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13581
13582 /* add base */
13583 switch (bSib & X86_SIB_BASE_MASK)
13584 {
13585 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13586 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13587 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13588 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13589 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13590 case 5:
13591 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13592 {
13593 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13594 SET_SS_DEF();
13595 }
13596 else
13597 {
13598 uint32_t u32Disp;
13599 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13600 u32EffAddr += u32Disp;
13601 }
13602 break;
13603 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13604 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13605 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13606 }
13607 break;
13608 }
13609 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13610 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13611 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13612 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13613 }
13614
13615 /* Get and add the displacement. */
13616 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13617 {
13618 case 0:
13619 break;
13620 case 1:
13621 {
13622 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13623 u32EffAddr += i8Disp;
13624 break;
13625 }
13626 case 2:
13627 {
13628 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13629 u32EffAddr += u32Disp;
13630 break;
13631 }
13632 default:
13633 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13634 }
13635 }
13636
13637 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13638 {
13639 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13640 return u32EffAddr;
13641 }
13642 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13643 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13644 return u32EffAddr & UINT16_MAX;
13645 }
13646
13647 uint64_t u64EffAddr;
13648
13649 /* Handle the rip+disp32 form with no registers first. */
13650 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13651 {
13652 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13653 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13654 }
13655 else
13656 {
13657 /* Get the register (or SIB) value. */
13658 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13659 {
13660 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13661 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13662 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13663 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13664 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13665 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13666 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13667 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13668 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13669 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13670 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13671 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13672 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13673 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13674 /* SIB */
13675 case 4:
13676 case 12:
13677 {
13678 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13679
13680 /* Get the index and scale it. */
13681 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13682 {
13683 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13684 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13685 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13686 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13687 case 4: u64EffAddr = 0; /*none */ break;
13688 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13689 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13690 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13691 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13692 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13693 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13694 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13695 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13696 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13697 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13698 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13699 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13700 }
13701 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13702
13703 /* add base */
13704 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13705 {
13706 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13707 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13708 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13709 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13710 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13711 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13712 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13713 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13714 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13715 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13716 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13717 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13718 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13719 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13720 /* complicated encodings */
13721 case 5:
13722 case 13:
13723 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13724 {
13725 if (!pVCpu->iem.s.uRexB)
13726 {
13727 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13728 SET_SS_DEF();
13729 }
13730 else
13731 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13732 }
13733 else
13734 {
13735 uint32_t u32Disp;
13736 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13737 u64EffAddr += (int32_t)u32Disp;
13738 }
13739 break;
13740 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13741 }
13742 break;
13743 }
13744 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13745 }
13746
13747 /* Get and add the displacement. */
13748 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13749 {
13750 case 0:
13751 break;
13752 case 1:
13753 {
13754 int8_t i8Disp;
13755 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13756 u64EffAddr += i8Disp;
13757 break;
13758 }
13759 case 2:
13760 {
13761 uint32_t u32Disp;
13762 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13763 u64EffAddr += (int32_t)u32Disp;
13764 break;
13765 }
13766 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13767 }
13768
13769 }
13770
13771 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13772 {
13773 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13774 return u64EffAddr;
13775 }
13776 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13777 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13778 return u64EffAddr & UINT32_MAX;
13779}
13780#endif /* IEM_WITH_SETJMP */
13781
13782/** @} */
13783
13784
13785
13786/*
13787 * Include the instructions
13788 */
13789#include "IEMAllInstructions.cpp.h"
13790
13791
13792
13793#ifdef LOG_ENABLED
13794/**
13795 * Logs the current instruction.
13796 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13797 * @param fSameCtx Set if we have the same context information as the VMM,
13798 * clear if we may have already executed an instruction in
13799 * our debug context. When clear, we assume IEMCPU holds
13800 * valid CPU mode info.
13801 *
13802 * The @a fSameCtx parameter is now misleading and obsolete.
13803 * @param pszFunction The IEM function doing the execution.
13804 */
13805IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13806{
13807# ifdef IN_RING3
13808 if (LogIs2Enabled())
13809 {
13810 char szInstr[256];
13811 uint32_t cbInstr = 0;
13812 if (fSameCtx)
13813 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13814 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13815 szInstr, sizeof(szInstr), &cbInstr);
13816 else
13817 {
13818 uint32_t fFlags = 0;
13819 switch (pVCpu->iem.s.enmCpuMode)
13820 {
13821 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13822 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13823 case IEMMODE_16BIT:
13824 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13825 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13826 else
13827 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13828 break;
13829 }
13830 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13831 szInstr, sizeof(szInstr), &cbInstr);
13832 }
13833
13834 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13835 Log2(("**** %s\n"
13836 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13837 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13838 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13839 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13840 " %s\n"
13841 , pszFunction,
13842 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13843 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13844 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13845 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13846 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13847 szInstr));
13848
13849 if (LogIs3Enabled())
13850 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13851 }
13852 else
13853# endif
13854 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13855 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13856 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13857}
13858#endif /* LOG_ENABLED */
13859
13860
13861/**
13862 * Makes status code addjustments (pass up from I/O and access handler)
13863 * as well as maintaining statistics.
13864 *
13865 * @returns Strict VBox status code to pass up.
13866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13867 * @param rcStrict The status from executing an instruction.
13868 */
13869DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13870{
13871 if (rcStrict != VINF_SUCCESS)
13872 {
13873 if (RT_SUCCESS(rcStrict))
13874 {
13875 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13876 || rcStrict == VINF_IOM_R3_IOPORT_READ
13877 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13878 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13879 || rcStrict == VINF_IOM_R3_MMIO_READ
13880 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13881 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13882 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13883 || rcStrict == VINF_CPUM_R3_MSR_READ
13884 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13885 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13886 || rcStrict == VINF_EM_RAW_TO_R3
13887 || rcStrict == VINF_EM_TRIPLE_FAULT
13888 || rcStrict == VINF_GIM_R3_HYPERCALL
13889 /* raw-mode / virt handlers only: */
13890 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13891 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13892 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13893 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13894 || rcStrict == VINF_SELM_SYNC_GDT
13895 || rcStrict == VINF_CSAM_PENDING_ACTION
13896 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13897 /* nested hw.virt codes: */
13898 || rcStrict == VINF_VMX_VMEXIT
13899 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13900 || rcStrict == VINF_SVM_VMEXIT
13901 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13902/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13903 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13904#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13905 if ( rcStrict == VINF_VMX_VMEXIT
13906 && rcPassUp == VINF_SUCCESS)
13907 rcStrict = VINF_SUCCESS;
13908 else
13909#endif
13910#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13911 if ( rcStrict == VINF_SVM_VMEXIT
13912 && rcPassUp == VINF_SUCCESS)
13913 rcStrict = VINF_SUCCESS;
13914 else
13915#endif
13916 if (rcPassUp == VINF_SUCCESS)
13917 pVCpu->iem.s.cRetInfStatuses++;
13918 else if ( rcPassUp < VINF_EM_FIRST
13919 || rcPassUp > VINF_EM_LAST
13920 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13921 {
13922 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13923 pVCpu->iem.s.cRetPassUpStatus++;
13924 rcStrict = rcPassUp;
13925 }
13926 else
13927 {
13928 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13929 pVCpu->iem.s.cRetInfStatuses++;
13930 }
13931 }
13932 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13933 pVCpu->iem.s.cRetAspectNotImplemented++;
13934 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13935 pVCpu->iem.s.cRetInstrNotImplemented++;
13936 else
13937 pVCpu->iem.s.cRetErrStatuses++;
13938 }
13939 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13940 {
13941 pVCpu->iem.s.cRetPassUpStatus++;
13942 rcStrict = pVCpu->iem.s.rcPassUp;
13943 }
13944
13945 return rcStrict;
13946}
13947
13948
13949/**
13950 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13951 * IEMExecOneWithPrefetchedByPC.
13952 *
13953 * Similar code is found in IEMExecLots.
13954 *
13955 * @return Strict VBox status code.
13956 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13957 * @param fExecuteInhibit If set, execute the instruction following CLI,
13958 * POP SS and MOV SS,GR.
13959 * @param pszFunction The calling function name.
13960 */
13961DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13962{
13963 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13964 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13965 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13966 RT_NOREF_PV(pszFunction);
13967
13968#ifdef IEM_WITH_SETJMP
13969 VBOXSTRICTRC rcStrict;
13970 jmp_buf JmpBuf;
13971 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13972 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13973 if ((rcStrict = setjmp(JmpBuf)) == 0)
13974 {
13975 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13976 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13977 }
13978 else
13979 pVCpu->iem.s.cLongJumps++;
13980 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13981#else
13982 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13983 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13984#endif
13985 if (rcStrict == VINF_SUCCESS)
13986 pVCpu->iem.s.cInstructions++;
13987 if (pVCpu->iem.s.cActiveMappings > 0)
13988 {
13989 Assert(rcStrict != VINF_SUCCESS);
13990 iemMemRollback(pVCpu);
13991 }
13992 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13993 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13994 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13995
13996//#ifdef DEBUG
13997// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13998//#endif
13999
14000 /* Execute the next instruction as well if a cli, pop ss or
14001 mov ss, Gr has just completed successfully. */
14002 if ( fExecuteInhibit
14003 && rcStrict == VINF_SUCCESS
14004 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14005 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14006 {
14007 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14008 if (rcStrict == VINF_SUCCESS)
14009 {
14010#ifdef LOG_ENABLED
14011 iemLogCurInstr(pVCpu, false, pszFunction);
14012#endif
14013#ifdef IEM_WITH_SETJMP
14014 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14015 if ((rcStrict = setjmp(JmpBuf)) == 0)
14016 {
14017 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14018 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14019 }
14020 else
14021 pVCpu->iem.s.cLongJumps++;
14022 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14023#else
14024 IEM_OPCODE_GET_NEXT_U8(&b);
14025 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14026#endif
14027 if (rcStrict == VINF_SUCCESS)
14028 pVCpu->iem.s.cInstructions++;
14029 if (pVCpu->iem.s.cActiveMappings > 0)
14030 {
14031 Assert(rcStrict != VINF_SUCCESS);
14032 iemMemRollback(pVCpu);
14033 }
14034 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14035 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14036 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14037 }
14038 else if (pVCpu->iem.s.cActiveMappings > 0)
14039 iemMemRollback(pVCpu);
14040 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14041 }
14042
14043 /*
14044 * Return value fiddling, statistics and sanity assertions.
14045 */
14046 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14047
14048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14050 return rcStrict;
14051}
14052
14053
14054#ifdef IN_RC
14055/**
14056 * Re-enters raw-mode or ensure we return to ring-3.
14057 *
14058 * @returns rcStrict, maybe modified.
14059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14060 * @param rcStrict The status code returne by the interpreter.
14061 */
14062DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14063{
14064 if ( !pVCpu->iem.s.fInPatchCode
14065 && ( rcStrict == VINF_SUCCESS
14066 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14067 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14068 {
14069 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14070 CPUMRawEnter(pVCpu);
14071 else
14072 {
14073 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14074 rcStrict = VINF_EM_RESCHEDULE;
14075 }
14076 }
14077 return rcStrict;
14078}
14079#endif
14080
14081
14082/**
14083 * Execute one instruction.
14084 *
14085 * @return Strict VBox status code.
14086 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14087 */
14088VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14089{
14090#ifdef LOG_ENABLED
14091 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14092#endif
14093
14094 /*
14095 * Do the decoding and emulation.
14096 */
14097 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14098 if (rcStrict == VINF_SUCCESS)
14099 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14100 else if (pVCpu->iem.s.cActiveMappings > 0)
14101 iemMemRollback(pVCpu);
14102
14103#ifdef IN_RC
14104 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14105#endif
14106 if (rcStrict != VINF_SUCCESS)
14107 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14108 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14109 return rcStrict;
14110}
14111
14112
14113VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14114{
14115 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14116
14117 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14118 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14119 if (rcStrict == VINF_SUCCESS)
14120 {
14121 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14122 if (pcbWritten)
14123 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14124 }
14125 else if (pVCpu->iem.s.cActiveMappings > 0)
14126 iemMemRollback(pVCpu);
14127
14128#ifdef IN_RC
14129 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14130#endif
14131 return rcStrict;
14132}
14133
14134
14135VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14136 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14137{
14138 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14139
14140 VBOXSTRICTRC rcStrict;
14141 if ( cbOpcodeBytes
14142 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14143 {
14144 iemInitDecoder(pVCpu, false);
14145#ifdef IEM_WITH_CODE_TLB
14146 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14147 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14148 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14149 pVCpu->iem.s.offCurInstrStart = 0;
14150 pVCpu->iem.s.offInstrNextByte = 0;
14151#else
14152 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14153 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14154#endif
14155 rcStrict = VINF_SUCCESS;
14156 }
14157 else
14158 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14159 if (rcStrict == VINF_SUCCESS)
14160 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14161 else if (pVCpu->iem.s.cActiveMappings > 0)
14162 iemMemRollback(pVCpu);
14163
14164#ifdef IN_RC
14165 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14166#endif
14167 return rcStrict;
14168}
14169
14170
14171VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14172{
14173 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14174
14175 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14176 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14177 if (rcStrict == VINF_SUCCESS)
14178 {
14179 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14180 if (pcbWritten)
14181 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14182 }
14183 else if (pVCpu->iem.s.cActiveMappings > 0)
14184 iemMemRollback(pVCpu);
14185
14186#ifdef IN_RC
14187 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14188#endif
14189 return rcStrict;
14190}
14191
14192
14193VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14194 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14195{
14196 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14197
14198 VBOXSTRICTRC rcStrict;
14199 if ( cbOpcodeBytes
14200 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14201 {
14202 iemInitDecoder(pVCpu, true);
14203#ifdef IEM_WITH_CODE_TLB
14204 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14205 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14206 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14207 pVCpu->iem.s.offCurInstrStart = 0;
14208 pVCpu->iem.s.offInstrNextByte = 0;
14209#else
14210 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14211 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14212#endif
14213 rcStrict = VINF_SUCCESS;
14214 }
14215 else
14216 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14217 if (rcStrict == VINF_SUCCESS)
14218 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14219 else if (pVCpu->iem.s.cActiveMappings > 0)
14220 iemMemRollback(pVCpu);
14221
14222#ifdef IN_RC
14223 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14224#endif
14225 return rcStrict;
14226}
14227
14228
14229/**
14230 * For debugging DISGetParamSize, may come in handy.
14231 *
14232 * @returns Strict VBox status code.
14233 * @param pVCpu The cross context virtual CPU structure of the
14234 * calling EMT.
14235 * @param pCtxCore The context core structure.
14236 * @param OpcodeBytesPC The PC of the opcode bytes.
14237 * @param pvOpcodeBytes Prefeched opcode bytes.
14238 * @param cbOpcodeBytes Number of prefetched bytes.
14239 * @param pcbWritten Where to return the number of bytes written.
14240 * Optional.
14241 */
14242VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14243 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14244 uint32_t *pcbWritten)
14245{
14246 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14247
14248 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14249 VBOXSTRICTRC rcStrict;
14250 if ( cbOpcodeBytes
14251 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14252 {
14253 iemInitDecoder(pVCpu, true);
14254#ifdef IEM_WITH_CODE_TLB
14255 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14256 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14257 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14258 pVCpu->iem.s.offCurInstrStart = 0;
14259 pVCpu->iem.s.offInstrNextByte = 0;
14260#else
14261 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14262 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14263#endif
14264 rcStrict = VINF_SUCCESS;
14265 }
14266 else
14267 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14268 if (rcStrict == VINF_SUCCESS)
14269 {
14270 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14271 if (pcbWritten)
14272 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14273 }
14274 else if (pVCpu->iem.s.cActiveMappings > 0)
14275 iemMemRollback(pVCpu);
14276
14277#ifdef IN_RC
14278 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14279#endif
14280 return rcStrict;
14281}
14282
14283
14284VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14285{
14286 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14287
14288 /*
14289 * See if there is an interrupt pending in TRPM, inject it if we can.
14290 */
14291 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14292#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14293 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14294 if (fIntrEnabled)
14295 {
14296 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14297 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14298 else
14299 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14300 }
14301#else
14302 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14303#endif
14304 if ( fIntrEnabled
14305 && TRPMHasTrap(pVCpu)
14306 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14307 {
14308 uint8_t u8TrapNo;
14309 TRPMEVENT enmType;
14310 RTGCUINT uErrCode;
14311 RTGCPTR uCr2;
14312 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14313 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14314 TRPMResetTrap(pVCpu);
14315 }
14316
14317 /*
14318 * Initial decoder init w/ prefetch, then setup setjmp.
14319 */
14320 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14321 if (rcStrict == VINF_SUCCESS)
14322 {
14323#ifdef IEM_WITH_SETJMP
14324 jmp_buf JmpBuf;
14325 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14326 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14327 pVCpu->iem.s.cActiveMappings = 0;
14328 if ((rcStrict = setjmp(JmpBuf)) == 0)
14329#endif
14330 {
14331 /*
14332 * The run loop. We limit ourselves to 4096 instructions right now.
14333 */
14334 PVM pVM = pVCpu->CTX_SUFF(pVM);
14335 uint32_t cInstr = 4096;
14336 for (;;)
14337 {
14338 /*
14339 * Log the state.
14340 */
14341#ifdef LOG_ENABLED
14342 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14343#endif
14344
14345 /*
14346 * Do the decoding and emulation.
14347 */
14348 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14349 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14350 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14351 {
14352 Assert(pVCpu->iem.s.cActiveMappings == 0);
14353 pVCpu->iem.s.cInstructions++;
14354 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14355 {
14356 uint64_t fCpu = pVCpu->fLocalForcedActions
14357 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14358 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14359 | VMCPU_FF_TLB_FLUSH
14360#ifdef VBOX_WITH_RAW_MODE
14361 | VMCPU_FF_TRPM_SYNC_IDT
14362 | VMCPU_FF_SELM_SYNC_TSS
14363 | VMCPU_FF_SELM_SYNC_GDT
14364 | VMCPU_FF_SELM_SYNC_LDT
14365#endif
14366 | VMCPU_FF_INHIBIT_INTERRUPTS
14367 | VMCPU_FF_BLOCK_NMIS
14368 | VMCPU_FF_UNHALT ));
14369
14370 if (RT_LIKELY( ( !fCpu
14371 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14372 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14373 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14374 {
14375 if (cInstr-- > 0)
14376 {
14377 Assert(pVCpu->iem.s.cActiveMappings == 0);
14378 iemReInitDecoder(pVCpu);
14379 continue;
14380 }
14381 }
14382 }
14383 Assert(pVCpu->iem.s.cActiveMappings == 0);
14384 }
14385 else if (pVCpu->iem.s.cActiveMappings > 0)
14386 iemMemRollback(pVCpu);
14387 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14388 break;
14389 }
14390 }
14391#ifdef IEM_WITH_SETJMP
14392 else
14393 {
14394 if (pVCpu->iem.s.cActiveMappings > 0)
14395 iemMemRollback(pVCpu);
14396 pVCpu->iem.s.cLongJumps++;
14397 }
14398 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14399#endif
14400
14401 /*
14402 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14403 */
14404 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14405 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14406 }
14407 else
14408 {
14409 if (pVCpu->iem.s.cActiveMappings > 0)
14410 iemMemRollback(pVCpu);
14411
14412#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14413 /*
14414 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14415 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14416 */
14417 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14418#endif
14419 }
14420
14421 /*
14422 * Maybe re-enter raw-mode and log.
14423 */
14424#ifdef IN_RC
14425 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14426#endif
14427 if (rcStrict != VINF_SUCCESS)
14428 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14429 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14430 if (pcInstructions)
14431 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14432 return rcStrict;
14433}
14434
14435
14436/**
14437 * Interface used by EMExecuteExec, does exit statistics and limits.
14438 *
14439 * @returns Strict VBox status code.
14440 * @param pVCpu The cross context virtual CPU structure.
14441 * @param fWillExit To be defined.
14442 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14443 * @param cMaxInstructions Maximum number of instructions to execute.
14444 * @param cMaxInstructionsWithoutExits
14445 * The max number of instructions without exits.
14446 * @param pStats Where to return statistics.
14447 */
14448VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14449 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14450{
14451 NOREF(fWillExit); /** @todo define flexible exit crits */
14452
14453 /*
14454 * Initialize return stats.
14455 */
14456 pStats->cInstructions = 0;
14457 pStats->cExits = 0;
14458 pStats->cMaxExitDistance = 0;
14459 pStats->cReserved = 0;
14460
14461 /*
14462 * Initial decoder init w/ prefetch, then setup setjmp.
14463 */
14464 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14465 if (rcStrict == VINF_SUCCESS)
14466 {
14467#ifdef IEM_WITH_SETJMP
14468 jmp_buf JmpBuf;
14469 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14470 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14471 pVCpu->iem.s.cActiveMappings = 0;
14472 if ((rcStrict = setjmp(JmpBuf)) == 0)
14473#endif
14474 {
14475#ifdef IN_RING0
14476 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14477#endif
14478 uint32_t cInstructionSinceLastExit = 0;
14479
14480 /*
14481 * The run loop. We limit ourselves to 4096 instructions right now.
14482 */
14483 PVM pVM = pVCpu->CTX_SUFF(pVM);
14484 for (;;)
14485 {
14486 /*
14487 * Log the state.
14488 */
14489#ifdef LOG_ENABLED
14490 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14491#endif
14492
14493 /*
14494 * Do the decoding and emulation.
14495 */
14496 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14497
14498 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14499 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14500
14501 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14502 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14503 {
14504 pStats->cExits += 1;
14505 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14506 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14507 cInstructionSinceLastExit = 0;
14508 }
14509
14510 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14511 {
14512 Assert(pVCpu->iem.s.cActiveMappings == 0);
14513 pVCpu->iem.s.cInstructions++;
14514 pStats->cInstructions++;
14515 cInstructionSinceLastExit++;
14516 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14517 {
14518 uint64_t fCpu = pVCpu->fLocalForcedActions
14519 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14520 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14521 | VMCPU_FF_TLB_FLUSH
14522#ifdef VBOX_WITH_RAW_MODE
14523 | VMCPU_FF_TRPM_SYNC_IDT
14524 | VMCPU_FF_SELM_SYNC_TSS
14525 | VMCPU_FF_SELM_SYNC_GDT
14526 | VMCPU_FF_SELM_SYNC_LDT
14527#endif
14528 | VMCPU_FF_INHIBIT_INTERRUPTS
14529 | VMCPU_FF_BLOCK_NMIS
14530 | VMCPU_FF_UNHALT ));
14531
14532 if (RT_LIKELY( ( ( !fCpu
14533 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14534 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14535 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14536 || pStats->cInstructions < cMinInstructions))
14537 {
14538 if (pStats->cInstructions < cMaxInstructions)
14539 {
14540 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14541 {
14542#ifdef IN_RING0
14543 if ( !fCheckPreemptionPending
14544 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14545#endif
14546 {
14547 Assert(pVCpu->iem.s.cActiveMappings == 0);
14548 iemReInitDecoder(pVCpu);
14549 continue;
14550 }
14551#ifdef IN_RING0
14552 rcStrict = VINF_EM_RAW_INTERRUPT;
14553 break;
14554#endif
14555 }
14556 }
14557 }
14558 Assert(!(fCpu & VMCPU_FF_IEM));
14559 }
14560 Assert(pVCpu->iem.s.cActiveMappings == 0);
14561 }
14562 else if (pVCpu->iem.s.cActiveMappings > 0)
14563 iemMemRollback(pVCpu);
14564 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14565 break;
14566 }
14567 }
14568#ifdef IEM_WITH_SETJMP
14569 else
14570 {
14571 if (pVCpu->iem.s.cActiveMappings > 0)
14572 iemMemRollback(pVCpu);
14573 pVCpu->iem.s.cLongJumps++;
14574 }
14575 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14576#endif
14577
14578 /*
14579 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14580 */
14581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14582 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14583 }
14584 else
14585 {
14586 if (pVCpu->iem.s.cActiveMappings > 0)
14587 iemMemRollback(pVCpu);
14588
14589#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14590 /*
14591 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14592 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14593 */
14594 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14595#endif
14596 }
14597
14598 /*
14599 * Maybe re-enter raw-mode and log.
14600 */
14601#ifdef IN_RC
14602 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14603#endif
14604 if (rcStrict != VINF_SUCCESS)
14605 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14606 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14607 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14608 return rcStrict;
14609}
14610
14611
14612/**
14613 * Injects a trap, fault, abort, software interrupt or external interrupt.
14614 *
14615 * The parameter list matches TRPMQueryTrapAll pretty closely.
14616 *
14617 * @returns Strict VBox status code.
14618 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14619 * @param u8TrapNo The trap number.
14620 * @param enmType What type is it (trap/fault/abort), software
14621 * interrupt or hardware interrupt.
14622 * @param uErrCode The error code if applicable.
14623 * @param uCr2 The CR2 value if applicable.
14624 * @param cbInstr The instruction length (only relevant for
14625 * software interrupts).
14626 */
14627VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14628 uint8_t cbInstr)
14629{
14630 iemInitDecoder(pVCpu, false);
14631#ifdef DBGFTRACE_ENABLED
14632 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14633 u8TrapNo, enmType, uErrCode, uCr2);
14634#endif
14635
14636 uint32_t fFlags;
14637 switch (enmType)
14638 {
14639 case TRPM_HARDWARE_INT:
14640 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14641 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14642 uErrCode = uCr2 = 0;
14643 break;
14644
14645 case TRPM_SOFTWARE_INT:
14646 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14647 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14648 uErrCode = uCr2 = 0;
14649 break;
14650
14651 case TRPM_TRAP:
14652 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14653 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14654 if (u8TrapNo == X86_XCPT_PF)
14655 fFlags |= IEM_XCPT_FLAGS_CR2;
14656 switch (u8TrapNo)
14657 {
14658 case X86_XCPT_DF:
14659 case X86_XCPT_TS:
14660 case X86_XCPT_NP:
14661 case X86_XCPT_SS:
14662 case X86_XCPT_PF:
14663 case X86_XCPT_AC:
14664 fFlags |= IEM_XCPT_FLAGS_ERR;
14665 break;
14666
14667 case X86_XCPT_NMI:
14668 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14669 break;
14670 }
14671 break;
14672
14673 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14674 }
14675
14676 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14677
14678 if (pVCpu->iem.s.cActiveMappings > 0)
14679 iemMemRollback(pVCpu);
14680
14681 return rcStrict;
14682}
14683
14684
14685/**
14686 * Injects the active TRPM event.
14687 *
14688 * @returns Strict VBox status code.
14689 * @param pVCpu The cross context virtual CPU structure.
14690 */
14691VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14692{
14693#ifndef IEM_IMPLEMENTS_TASKSWITCH
14694 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14695#else
14696 uint8_t u8TrapNo;
14697 TRPMEVENT enmType;
14698 RTGCUINT uErrCode;
14699 RTGCUINTPTR uCr2;
14700 uint8_t cbInstr;
14701 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14702 if (RT_FAILURE(rc))
14703 return rc;
14704
14705 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14706# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14707 if (rcStrict == VINF_SVM_VMEXIT)
14708 rcStrict = VINF_SUCCESS;
14709# endif
14710
14711 /** @todo Are there any other codes that imply the event was successfully
14712 * delivered to the guest? See @bugref{6607}. */
14713 if ( rcStrict == VINF_SUCCESS
14714 || rcStrict == VINF_IEM_RAISED_XCPT)
14715 TRPMResetTrap(pVCpu);
14716
14717 return rcStrict;
14718#endif
14719}
14720
14721
14722VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14723{
14724 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14725 return VERR_NOT_IMPLEMENTED;
14726}
14727
14728
14729VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14730{
14731 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14732 return VERR_NOT_IMPLEMENTED;
14733}
14734
14735
14736#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14737/**
14738 * Executes a IRET instruction with default operand size.
14739 *
14740 * This is for PATM.
14741 *
14742 * @returns VBox status code.
14743 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14744 * @param pCtxCore The register frame.
14745 */
14746VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14747{
14748 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14749
14750 iemCtxCoreToCtx(pCtx, pCtxCore);
14751 iemInitDecoder(pVCpu);
14752 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14753 if (rcStrict == VINF_SUCCESS)
14754 iemCtxToCtxCore(pCtxCore, pCtx);
14755 else
14756 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14757 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14758 return rcStrict;
14759}
14760#endif
14761
14762
14763/**
14764 * Macro used by the IEMExec* method to check the given instruction length.
14765 *
14766 * Will return on failure!
14767 *
14768 * @param a_cbInstr The given instruction length.
14769 * @param a_cbMin The minimum length.
14770 */
14771#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14772 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14773 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14774
14775
14776/**
14777 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14778 *
14779 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14780 *
14781 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14783 * @param rcStrict The status code to fiddle.
14784 */
14785DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14786{
14787 iemUninitExec(pVCpu);
14788#ifdef IN_RC
14789 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14790#else
14791 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14792#endif
14793}
14794
14795
14796/**
14797 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14798 *
14799 * This API ASSUMES that the caller has already verified that the guest code is
14800 * allowed to access the I/O port. (The I/O port is in the DX register in the
14801 * guest state.)
14802 *
14803 * @returns Strict VBox status code.
14804 * @param pVCpu The cross context virtual CPU structure.
14805 * @param cbValue The size of the I/O port access (1, 2, or 4).
14806 * @param enmAddrMode The addressing mode.
14807 * @param fRepPrefix Indicates whether a repeat prefix is used
14808 * (doesn't matter which for this instruction).
14809 * @param cbInstr The instruction length in bytes.
14810 * @param iEffSeg The effective segment address.
14811 * @param fIoChecked Whether the access to the I/O port has been
14812 * checked or not. It's typically checked in the
14813 * HM scenario.
14814 */
14815VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14816 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14817{
14818 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14819 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14820
14821 /*
14822 * State init.
14823 */
14824 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14825
14826 /*
14827 * Switch orgy for getting to the right handler.
14828 */
14829 VBOXSTRICTRC rcStrict;
14830 if (fRepPrefix)
14831 {
14832 switch (enmAddrMode)
14833 {
14834 case IEMMODE_16BIT:
14835 switch (cbValue)
14836 {
14837 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14838 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14839 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14840 default:
14841 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14842 }
14843 break;
14844
14845 case IEMMODE_32BIT:
14846 switch (cbValue)
14847 {
14848 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14849 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14850 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14851 default:
14852 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14853 }
14854 break;
14855
14856 case IEMMODE_64BIT:
14857 switch (cbValue)
14858 {
14859 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14860 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14861 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14862 default:
14863 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14864 }
14865 break;
14866
14867 default:
14868 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14869 }
14870 }
14871 else
14872 {
14873 switch (enmAddrMode)
14874 {
14875 case IEMMODE_16BIT:
14876 switch (cbValue)
14877 {
14878 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14879 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14880 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14881 default:
14882 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14883 }
14884 break;
14885
14886 case IEMMODE_32BIT:
14887 switch (cbValue)
14888 {
14889 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14890 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14891 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14892 default:
14893 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14894 }
14895 break;
14896
14897 case IEMMODE_64BIT:
14898 switch (cbValue)
14899 {
14900 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14901 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14902 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14903 default:
14904 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14905 }
14906 break;
14907
14908 default:
14909 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14910 }
14911 }
14912
14913 if (pVCpu->iem.s.cActiveMappings)
14914 iemMemRollback(pVCpu);
14915
14916 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14917}
14918
14919
14920/**
14921 * Interface for HM and EM for executing string I/O IN (read) instructions.
14922 *
14923 * This API ASSUMES that the caller has already verified that the guest code is
14924 * allowed to access the I/O port. (The I/O port is in the DX register in the
14925 * guest state.)
14926 *
14927 * @returns Strict VBox status code.
14928 * @param pVCpu The cross context virtual CPU structure.
14929 * @param cbValue The size of the I/O port access (1, 2, or 4).
14930 * @param enmAddrMode The addressing mode.
14931 * @param fRepPrefix Indicates whether a repeat prefix is used
14932 * (doesn't matter which for this instruction).
14933 * @param cbInstr The instruction length in bytes.
14934 * @param fIoChecked Whether the access to the I/O port has been
14935 * checked or not. It's typically checked in the
14936 * HM scenario.
14937 */
14938VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14939 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14940{
14941 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14942
14943 /*
14944 * State init.
14945 */
14946 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14947
14948 /*
14949 * Switch orgy for getting to the right handler.
14950 */
14951 VBOXSTRICTRC rcStrict;
14952 if (fRepPrefix)
14953 {
14954 switch (enmAddrMode)
14955 {
14956 case IEMMODE_16BIT:
14957 switch (cbValue)
14958 {
14959 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14960 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14961 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14962 default:
14963 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14964 }
14965 break;
14966
14967 case IEMMODE_32BIT:
14968 switch (cbValue)
14969 {
14970 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14971 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14972 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14973 default:
14974 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14975 }
14976 break;
14977
14978 case IEMMODE_64BIT:
14979 switch (cbValue)
14980 {
14981 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14982 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14983 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14984 default:
14985 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14986 }
14987 break;
14988
14989 default:
14990 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14991 }
14992 }
14993 else
14994 {
14995 switch (enmAddrMode)
14996 {
14997 case IEMMODE_16BIT:
14998 switch (cbValue)
14999 {
15000 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15001 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15002 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15003 default:
15004 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15005 }
15006 break;
15007
15008 case IEMMODE_32BIT:
15009 switch (cbValue)
15010 {
15011 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15012 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15013 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15014 default:
15015 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15016 }
15017 break;
15018
15019 case IEMMODE_64BIT:
15020 switch (cbValue)
15021 {
15022 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15023 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15024 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15025 default:
15026 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15027 }
15028 break;
15029
15030 default:
15031 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15032 }
15033 }
15034
15035 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15036 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15037}
15038
15039
15040/**
15041 * Interface for rawmode to write execute an OUT instruction.
15042 *
15043 * @returns Strict VBox status code.
15044 * @param pVCpu The cross context virtual CPU structure.
15045 * @param cbInstr The instruction length in bytes.
15046 * @param u16Port The port to read.
15047 * @param fImm Whether the port is specified using an immediate operand or
15048 * using the implicit DX register.
15049 * @param cbReg The register size.
15050 *
15051 * @remarks In ring-0 not all of the state needs to be synced in.
15052 */
15053VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15054{
15055 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15056 Assert(cbReg <= 4 && cbReg != 3);
15057
15058 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15059 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15060 Assert(!pVCpu->iem.s.cActiveMappings);
15061 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15062}
15063
15064
15065/**
15066 * Interface for rawmode to write execute an IN instruction.
15067 *
15068 * @returns Strict VBox status code.
15069 * @param pVCpu The cross context virtual CPU structure.
15070 * @param cbInstr The instruction length in bytes.
15071 * @param u16Port The port to read.
15072 * @param fImm Whether the port is specified using an immediate operand or
15073 * using the implicit DX.
15074 * @param cbReg The register size.
15075 */
15076VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15077{
15078 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15079 Assert(cbReg <= 4 && cbReg != 3);
15080
15081 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15082 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15083 Assert(!pVCpu->iem.s.cActiveMappings);
15084 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15085}
15086
15087
15088/**
15089 * Interface for HM and EM to write to a CRx register.
15090 *
15091 * @returns Strict VBox status code.
15092 * @param pVCpu The cross context virtual CPU structure.
15093 * @param cbInstr The instruction length in bytes.
15094 * @param iCrReg The control register number (destination).
15095 * @param iGReg The general purpose register number (source).
15096 *
15097 * @remarks In ring-0 not all of the state needs to be synced in.
15098 */
15099VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15100{
15101 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15102 Assert(iCrReg < 16);
15103 Assert(iGReg < 16);
15104
15105 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15106 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15107 Assert(!pVCpu->iem.s.cActiveMappings);
15108 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15109}
15110
15111
15112/**
15113 * Interface for HM and EM to read from a CRx register.
15114 *
15115 * @returns Strict VBox status code.
15116 * @param pVCpu The cross context virtual CPU structure.
15117 * @param cbInstr The instruction length in bytes.
15118 * @param iGReg The general purpose register number (destination).
15119 * @param iCrReg The control register number (source).
15120 *
15121 * @remarks In ring-0 not all of the state needs to be synced in.
15122 */
15123VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15124{
15125 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15126 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15127 | CPUMCTX_EXTRN_APIC_TPR);
15128 Assert(iCrReg < 16);
15129 Assert(iGReg < 16);
15130
15131 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15132 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15133 Assert(!pVCpu->iem.s.cActiveMappings);
15134 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15135}
15136
15137
15138/**
15139 * Interface for HM and EM to clear the CR0[TS] bit.
15140 *
15141 * @returns Strict VBox status code.
15142 * @param pVCpu The cross context virtual CPU structure.
15143 * @param cbInstr The instruction length in bytes.
15144 *
15145 * @remarks In ring-0 not all of the state needs to be synced in.
15146 */
15147VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15148{
15149 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15150
15151 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15152 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15153 Assert(!pVCpu->iem.s.cActiveMappings);
15154 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15155}
15156
15157
15158/**
15159 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15160 *
15161 * @returns Strict VBox status code.
15162 * @param pVCpu The cross context virtual CPU structure.
15163 * @param cbInstr The instruction length in bytes.
15164 * @param uValue The value to load into CR0.
15165 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15166 * memory operand. Otherwise pass NIL_RTGCPTR.
15167 *
15168 * @remarks In ring-0 not all of the state needs to be synced in.
15169 */
15170VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15171{
15172 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15173
15174 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15175 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15176 Assert(!pVCpu->iem.s.cActiveMappings);
15177 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15178}
15179
15180
15181/**
15182 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15183 *
15184 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15185 *
15186 * @returns Strict VBox status code.
15187 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15188 * @param cbInstr The instruction length in bytes.
15189 * @remarks In ring-0 not all of the state needs to be synced in.
15190 * @thread EMT(pVCpu)
15191 */
15192VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15193{
15194 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15195
15196 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15197 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15198 Assert(!pVCpu->iem.s.cActiveMappings);
15199 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15200}
15201
15202
15203/**
15204 * Interface for HM and EM to emulate the WBINVD instruction.
15205 *
15206 * @returns Strict VBox status code.
15207 * @param pVCpu The cross context virtual CPU structure.
15208 * @param cbInstr The instruction length in bytes.
15209 *
15210 * @remarks In ring-0 not all of the state needs to be synced in.
15211 */
15212VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15213{
15214 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15215
15216 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15217 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15218 Assert(!pVCpu->iem.s.cActiveMappings);
15219 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15220}
15221
15222
15223/**
15224 * Interface for HM and EM to emulate the INVD instruction.
15225 *
15226 * @returns Strict VBox status code.
15227 * @param pVCpu The cross context virtual CPU structure.
15228 * @param cbInstr The instruction length in bytes.
15229 *
15230 * @remarks In ring-0 not all of the state needs to be synced in.
15231 */
15232VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15233{
15234 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15235
15236 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15237 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15238 Assert(!pVCpu->iem.s.cActiveMappings);
15239 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15240}
15241
15242
15243/**
15244 * Interface for HM and EM to emulate the INVLPG instruction.
15245 *
15246 * @returns Strict VBox status code.
15247 * @retval VINF_PGM_SYNC_CR3
15248 *
15249 * @param pVCpu The cross context virtual CPU structure.
15250 * @param cbInstr The instruction length in bytes.
15251 * @param GCPtrPage The effective address of the page to invalidate.
15252 *
15253 * @remarks In ring-0 not all of the state needs to be synced in.
15254 */
15255VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15256{
15257 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15258
15259 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15260 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15261 Assert(!pVCpu->iem.s.cActiveMappings);
15262 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15263}
15264
15265
15266/**
15267 * Interface for HM and EM to emulate the CPUID instruction.
15268 *
15269 * @returns Strict VBox status code.
15270 *
15271 * @param pVCpu The cross context virtual CPU structure.
15272 * @param cbInstr The instruction length in bytes.
15273 *
15274 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15275 */
15276VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15277{
15278 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15279 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15280
15281 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15282 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15283 Assert(!pVCpu->iem.s.cActiveMappings);
15284 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15285}
15286
15287
15288/**
15289 * Interface for HM and EM to emulate the RDPMC instruction.
15290 *
15291 * @returns Strict VBox status code.
15292 *
15293 * @param pVCpu The cross context virtual CPU structure.
15294 * @param cbInstr The instruction length in bytes.
15295 *
15296 * @remarks Not all of the state needs to be synced in.
15297 */
15298VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15299{
15300 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15301 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15302
15303 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15304 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15305 Assert(!pVCpu->iem.s.cActiveMappings);
15306 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15307}
15308
15309
15310/**
15311 * Interface for HM and EM to emulate the RDTSC instruction.
15312 *
15313 * @returns Strict VBox status code.
15314 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15315 *
15316 * @param pVCpu The cross context virtual CPU structure.
15317 * @param cbInstr The instruction length in bytes.
15318 *
15319 * @remarks Not all of the state needs to be synced in.
15320 */
15321VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15322{
15323 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15324 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15325
15326 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15327 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15328 Assert(!pVCpu->iem.s.cActiveMappings);
15329 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15330}
15331
15332
15333/**
15334 * Interface for HM and EM to emulate the RDTSCP instruction.
15335 *
15336 * @returns Strict VBox status code.
15337 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15338 *
15339 * @param pVCpu The cross context virtual CPU structure.
15340 * @param cbInstr The instruction length in bytes.
15341 *
15342 * @remarks Not all of the state needs to be synced in. Recommended
15343 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15344 */
15345VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15346{
15347 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15348 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15349
15350 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15352 Assert(!pVCpu->iem.s.cActiveMappings);
15353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15354}
15355
15356
15357/**
15358 * Interface for HM and EM to emulate the RDMSR instruction.
15359 *
15360 * @returns Strict VBox status code.
15361 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15362 *
15363 * @param pVCpu The cross context virtual CPU structure.
15364 * @param cbInstr The instruction length in bytes.
15365 *
15366 * @remarks Not all of the state needs to be synced in. Requires RCX and
15367 * (currently) all MSRs.
15368 */
15369VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15370{
15371 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15372 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15373
15374 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15375 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15376 Assert(!pVCpu->iem.s.cActiveMappings);
15377 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15378}
15379
15380
15381/**
15382 * Interface for HM and EM to emulate the WRMSR instruction.
15383 *
15384 * @returns Strict VBox status code.
15385 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15386 *
15387 * @param pVCpu The cross context virtual CPU structure.
15388 * @param cbInstr The instruction length in bytes.
15389 *
15390 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15391 * and (currently) all MSRs.
15392 */
15393VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15394{
15395 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15396 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15397 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15398
15399 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15400 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15401 Assert(!pVCpu->iem.s.cActiveMappings);
15402 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15403}
15404
15405
15406/**
15407 * Interface for HM and EM to emulate the MONITOR instruction.
15408 *
15409 * @returns Strict VBox status code.
15410 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15411 *
15412 * @param pVCpu The cross context virtual CPU structure.
15413 * @param cbInstr The instruction length in bytes.
15414 *
15415 * @remarks Not all of the state needs to be synced in.
15416 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15417 * are used.
15418 */
15419VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15420{
15421 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15422 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15423
15424 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15425 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15426 Assert(!pVCpu->iem.s.cActiveMappings);
15427 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15428}
15429
15430
15431/**
15432 * Interface for HM and EM to emulate the MWAIT instruction.
15433 *
15434 * @returns Strict VBox status code.
15435 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15436 *
15437 * @param pVCpu The cross context virtual CPU structure.
15438 * @param cbInstr The instruction length in bytes.
15439 *
15440 * @remarks Not all of the state needs to be synced in.
15441 */
15442VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15443{
15444 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15445
15446 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15447 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15448 Assert(!pVCpu->iem.s.cActiveMappings);
15449 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15450}
15451
15452
15453/**
15454 * Interface for HM and EM to emulate the HLT instruction.
15455 *
15456 * @returns Strict VBox status code.
15457 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15458 *
15459 * @param pVCpu The cross context virtual CPU structure.
15460 * @param cbInstr The instruction length in bytes.
15461 *
15462 * @remarks Not all of the state needs to be synced in.
15463 */
15464VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15465{
15466 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15467
15468 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15469 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15470 Assert(!pVCpu->iem.s.cActiveMappings);
15471 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15472}
15473
15474
15475/**
15476 * Checks if IEM is in the process of delivering an event (interrupt or
15477 * exception).
15478 *
15479 * @returns true if we're in the process of raising an interrupt or exception,
15480 * false otherwise.
15481 * @param pVCpu The cross context virtual CPU structure.
15482 * @param puVector Where to store the vector associated with the
15483 * currently delivered event, optional.
15484 * @param pfFlags Where to store th event delivery flags (see
15485 * IEM_XCPT_FLAGS_XXX), optional.
15486 * @param puErr Where to store the error code associated with the
15487 * event, optional.
15488 * @param puCr2 Where to store the CR2 associated with the event,
15489 * optional.
15490 * @remarks The caller should check the flags to determine if the error code and
15491 * CR2 are valid for the event.
15492 */
15493VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15494{
15495 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15496 if (fRaisingXcpt)
15497 {
15498 if (puVector)
15499 *puVector = pVCpu->iem.s.uCurXcpt;
15500 if (pfFlags)
15501 *pfFlags = pVCpu->iem.s.fCurXcpt;
15502 if (puErr)
15503 *puErr = pVCpu->iem.s.uCurXcptErr;
15504 if (puCr2)
15505 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15506 }
15507 return fRaisingXcpt;
15508}
15509
15510#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15511
15512/**
15513 * Interface for HM and EM to emulate the CLGI instruction.
15514 *
15515 * @returns Strict VBox status code.
15516 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15517 * @param cbInstr The instruction length in bytes.
15518 * @thread EMT(pVCpu)
15519 */
15520VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15521{
15522 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15523
15524 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15525 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15526 Assert(!pVCpu->iem.s.cActiveMappings);
15527 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15528}
15529
15530
15531/**
15532 * Interface for HM and EM to emulate the STGI instruction.
15533 *
15534 * @returns Strict VBox status code.
15535 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15536 * @param cbInstr The instruction length in bytes.
15537 * @thread EMT(pVCpu)
15538 */
15539VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15540{
15541 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15542
15543 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15544 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15545 Assert(!pVCpu->iem.s.cActiveMappings);
15546 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15547}
15548
15549
15550/**
15551 * Interface for HM and EM to emulate the VMLOAD instruction.
15552 *
15553 * @returns Strict VBox status code.
15554 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15555 * @param cbInstr The instruction length in bytes.
15556 * @thread EMT(pVCpu)
15557 */
15558VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15559{
15560 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15561
15562 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15563 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15564 Assert(!pVCpu->iem.s.cActiveMappings);
15565 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15566}
15567
15568
15569/**
15570 * Interface for HM and EM to emulate the VMSAVE instruction.
15571 *
15572 * @returns Strict VBox status code.
15573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15574 * @param cbInstr The instruction length in bytes.
15575 * @thread EMT(pVCpu)
15576 */
15577VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15578{
15579 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15580
15581 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15582 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15583 Assert(!pVCpu->iem.s.cActiveMappings);
15584 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15585}
15586
15587
15588/**
15589 * Interface for HM and EM to emulate the INVLPGA instruction.
15590 *
15591 * @returns Strict VBox status code.
15592 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15593 * @param cbInstr The instruction length in bytes.
15594 * @thread EMT(pVCpu)
15595 */
15596VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15597{
15598 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15599
15600 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15601 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15602 Assert(!pVCpu->iem.s.cActiveMappings);
15603 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15604}
15605
15606
15607/**
15608 * Interface for HM and EM to emulate the VMRUN instruction.
15609 *
15610 * @returns Strict VBox status code.
15611 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15612 * @param cbInstr The instruction length in bytes.
15613 * @thread EMT(pVCpu)
15614 */
15615VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15616{
15617 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15618 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15619
15620 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15621 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15622 Assert(!pVCpu->iem.s.cActiveMappings);
15623 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15624}
15625
15626
15627/**
15628 * Interface for HM and EM to emulate \#VMEXIT.
15629 *
15630 * @returns Strict VBox status code.
15631 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15632 * @param uExitCode The exit code.
15633 * @param uExitInfo1 The exit info. 1 field.
15634 * @param uExitInfo2 The exit info. 2 field.
15635 * @thread EMT(pVCpu)
15636 */
15637VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15638{
15639 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15640 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15641 if (pVCpu->iem.s.cActiveMappings)
15642 iemMemRollback(pVCpu);
15643 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15644}
15645
15646#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15647
15648#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15649
15650/**
15651 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15652 *
15653 * @returns Strict VBox status code.
15654 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15655 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15656 * the x2APIC device.
15657 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15658 *
15659 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15660 * @param idMsr The MSR being read.
15661 * @param pu64Value Pointer to the value being written or where to store the
15662 * value being read.
15663 * @param fWrite Whether this is an MSR write or read access.
15664 * @thread EMT(pVCpu)
15665 */
15666VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15667{
15668 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
15669 Assert(pu64Value);
15670
15671 VBOXSTRICTRC rcStrict;
15672 if (!fWrite)
15673 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15674 else
15675 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15676 if (pVCpu->iem.s.cActiveMappings)
15677 iemMemRollback(pVCpu);
15678 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15679
15680}
15681
15682
15683/**
15684 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15685 *
15686 * @returns Strict VBox status code.
15687 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15688 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15689 *
15690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15691 * @param offAccess The offset of the register being accessed (within the
15692 * APIC-access page).
15693 * @param cbAccess The size of the access in bytes.
15694 * @param pvData Pointer to the data being written or where to store the data
15695 * being read.
15696 * @param fWrite Whether this is a write or read access.
15697 * @thread EMT(pVCpu)
15698 */
15699VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
15700 bool fWrite)
15701{
15702 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15703 Assert(pvData);
15704
15705 /** @todo NSTVMX: Unfortunately, the caller has no idea about instruction fetch
15706 * accesses, so we only use read/write here. Maybe in the future the PGM
15707 * physical handler will be extended to include this information? */
15708 uint32_t const fAccess = fWrite ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
15709 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbAccess, pvData, fAccess);
15710 if (pVCpu->iem.s.cActiveMappings)
15711 iemMemRollback(pVCpu);
15712 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15713}
15714
15715
15716/**
15717 * Interface for HM and EM to perform a APIC-write emulation.
15718 *
15719 * @returns Strict VBox status code.
15720 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15721 * @thread EMT(pVCpu)
15722 */
15723VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicWriteEmulation(PVMCPU pVCpu)
15724{
15725 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15726
15727 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15728 if (pVCpu->iem.s.cActiveMappings)
15729 iemMemRollback(pVCpu);
15730 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15731}
15732
15733
15734/**
15735 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15736 *
15737 * @returns Strict VBox status code.
15738 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15739 * @thread EMT(pVCpu)
15740 */
15741VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15742{
15743 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15744 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15745 if (pVCpu->iem.s.cActiveMappings)
15746 iemMemRollback(pVCpu);
15747 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15748}
15749
15750
15751/**
15752 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15753 *
15754 * @returns Strict VBox status code.
15755 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15756 * @param uVector The external interrupt vector.
15757 * @param fIntPending Whether the external interrupt is pending or
15758 * acknowdledged in the interrupt controller.
15759 * @thread EMT(pVCpu)
15760 */
15761VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15762{
15763 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15764 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15765 if (pVCpu->iem.s.cActiveMappings)
15766 iemMemRollback(pVCpu);
15767 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15768}
15769
15770
15771/**
15772 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15773 *
15774 * @returns Strict VBox status code.
15775 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15776 * @param uVector The SIPI vector.
15777 * @thread EMT(pVCpu)
15778 */
15779VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15780{
15781 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15782 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15783 if (pVCpu->iem.s.cActiveMappings)
15784 iemMemRollback(pVCpu);
15785 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15786}
15787
15788
15789/**
15790 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15791 *
15792 * @returns Strict VBox status code.
15793 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15794 * @thread EMT(pVCpu)
15795 */
15796VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15797{
15798 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15799 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15800 if (pVCpu->iem.s.cActiveMappings)
15801 iemMemRollback(pVCpu);
15802 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15803}
15804
15805
15806/**
15807 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15808 *
15809 * @returns Strict VBox status code.
15810 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15811 * @param uExitReason The VM-exit reason.
15812 * @param uExitQual The VM-exit qualification.
15813 *
15814 * @thread EMT(pVCpu)
15815 */
15816VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15817{
15818 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15819 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15820 if (pVCpu->iem.s.cActiveMappings)
15821 iemMemRollback(pVCpu);
15822 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15823}
15824
15825
15826/**
15827 * Interface for HM and EM to emulate the VMREAD instruction.
15828 *
15829 * @returns Strict VBox status code.
15830 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15831 * @param pExitInfo Pointer to the VM-exit information struct.
15832 * @thread EMT(pVCpu)
15833 */
15834VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15835{
15836 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15837 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15838 Assert(pExitInfo);
15839
15840 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15841
15842 VBOXSTRICTRC rcStrict;
15843 uint8_t const cbInstr = pExitInfo->cbInstr;
15844 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15845 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15846 {
15847 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15848 {
15849 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15850 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15851 }
15852 else
15853 {
15854 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15855 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15856 }
15857 }
15858 else
15859 {
15860 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15861 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15862 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15863 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15864 }
15865 if (pVCpu->iem.s.cActiveMappings)
15866 iemMemRollback(pVCpu);
15867 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15868}
15869
15870
15871/**
15872 * Interface for HM and EM to emulate the VMWRITE instruction.
15873 *
15874 * @returns Strict VBox status code.
15875 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15876 * @param pExitInfo Pointer to the VM-exit information struct.
15877 * @thread EMT(pVCpu)
15878 */
15879VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15880{
15881 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15882 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15883 Assert(pExitInfo);
15884
15885 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15886
15887 uint64_t u64Val;
15888 uint8_t iEffSeg;
15889 IEMMODE enmEffAddrMode;
15890 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15891 {
15892 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15893 iEffSeg = UINT8_MAX;
15894 enmEffAddrMode = UINT8_MAX;
15895 }
15896 else
15897 {
15898 u64Val = pExitInfo->GCPtrEffAddr;
15899 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15900 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15901 }
15902 uint8_t const cbInstr = pExitInfo->cbInstr;
15903 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15904 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15905 if (pVCpu->iem.s.cActiveMappings)
15906 iemMemRollback(pVCpu);
15907 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15908}
15909
15910
15911/**
15912 * Interface for HM and EM to emulate the VMPTRLD instruction.
15913 *
15914 * @returns Strict VBox status code.
15915 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15916 * @param pExitInfo Pointer to the VM-exit information struct.
15917 * @thread EMT(pVCpu)
15918 */
15919VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15920{
15921 Assert(pExitInfo);
15922 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15923 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15924
15925 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15926
15927 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15928 uint8_t const cbInstr = pExitInfo->cbInstr;
15929 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15930 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15931 if (pVCpu->iem.s.cActiveMappings)
15932 iemMemRollback(pVCpu);
15933 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15934}
15935
15936
15937/**
15938 * Interface for HM and EM to emulate the VMPTRST instruction.
15939 *
15940 * @returns Strict VBox status code.
15941 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15942 * @param pExitInfo Pointer to the VM-exit information struct.
15943 * @thread EMT(pVCpu)
15944 */
15945VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15946{
15947 Assert(pExitInfo);
15948 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15949 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15950
15951 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15952
15953 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15954 uint8_t const cbInstr = pExitInfo->cbInstr;
15955 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15956 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15957 if (pVCpu->iem.s.cActiveMappings)
15958 iemMemRollback(pVCpu);
15959 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15960}
15961
15962
15963/**
15964 * Interface for HM and EM to emulate the VMCLEAR instruction.
15965 *
15966 * @returns Strict VBox status code.
15967 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15968 * @param pExitInfo Pointer to the VM-exit information struct.
15969 * @thread EMT(pVCpu)
15970 */
15971VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15972{
15973 Assert(pExitInfo);
15974 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15975 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15976
15977 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15978
15979 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15980 uint8_t const cbInstr = pExitInfo->cbInstr;
15981 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15982 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15983 if (pVCpu->iem.s.cActiveMappings)
15984 iemMemRollback(pVCpu);
15985 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15986}
15987
15988
15989/**
15990 * Interface for HM and EM to emulate the VMXON instruction.
15991 *
15992 * @returns Strict VBox status code.
15993 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15994 * @param pExitInfo Pointer to the VM-exit information struct.
15995 * @thread EMT(pVCpu)
15996 */
15997VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15998{
15999 Assert(pExitInfo);
16000 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16001 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16002
16003 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16004
16005 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16006 uint8_t const cbInstr = pExitInfo->cbInstr;
16007 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16008 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16009 if (pVCpu->iem.s.cActiveMappings)
16010 iemMemRollback(pVCpu);
16011 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16012}
16013
16014
16015/**
16016 * Interface for HM and EM to emulate the VMXOFF instruction.
16017 *
16018 * @returns Strict VBox status code.
16019 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16020 * @param cbInstr The instruction length in bytes.
16021 * @thread EMT(pVCpu)
16022 */
16023VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16024{
16025 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16026 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HM_VMX_MASK);
16027
16028 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16029 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16030 Assert(!pVCpu->iem.s.cActiveMappings);
16031 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16032}
16033
16034
16035/**
16036 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16037 *
16038 * @remarks The @a pvUser argument is currently unused.
16039 */
16040PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16041 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16042 PGMACCESSORIGIN enmOrigin, void *pvUser)
16043{
16044 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16045
16046 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
16047 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16048
16049#ifdef VBOX_STRICT
16050 RTGCPHYS const GCPhysApicBase = CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu));
16051 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16052 Assert(GCPhysApicBase == GCPhysAccessBase);
16053#endif
16054
16055 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16056 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16057
16058 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16059 if (RT_FAILURE(rcStrict))
16060 return rcStrict;
16061
16062 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16063 return VINF_SUCCESS;
16064}
16065
16066#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16067
16068#ifdef IN_RING3
16069
16070/**
16071 * Handles the unlikely and probably fatal merge cases.
16072 *
16073 * @returns Merged status code.
16074 * @param rcStrict Current EM status code.
16075 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16076 * with @a rcStrict.
16077 * @param iMemMap The memory mapping index. For error reporting only.
16078 * @param pVCpu The cross context virtual CPU structure of the calling
16079 * thread, for error reporting only.
16080 */
16081DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16082 unsigned iMemMap, PVMCPU pVCpu)
16083{
16084 if (RT_FAILURE_NP(rcStrict))
16085 return rcStrict;
16086
16087 if (RT_FAILURE_NP(rcStrictCommit))
16088 return rcStrictCommit;
16089
16090 if (rcStrict == rcStrictCommit)
16091 return rcStrictCommit;
16092
16093 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16094 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16095 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16096 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16097 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16098 return VERR_IOM_FF_STATUS_IPE;
16099}
16100
16101
16102/**
16103 * Helper for IOMR3ProcessForceFlag.
16104 *
16105 * @returns Merged status code.
16106 * @param rcStrict Current EM status code.
16107 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16108 * with @a rcStrict.
16109 * @param iMemMap The memory mapping index. For error reporting only.
16110 * @param pVCpu The cross context virtual CPU structure of the calling
16111 * thread, for error reporting only.
16112 */
16113DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16114{
16115 /* Simple. */
16116 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16117 return rcStrictCommit;
16118
16119 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16120 return rcStrict;
16121
16122 /* EM scheduling status codes. */
16123 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16124 && rcStrict <= VINF_EM_LAST))
16125 {
16126 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16127 && rcStrictCommit <= VINF_EM_LAST))
16128 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16129 }
16130
16131 /* Unlikely */
16132 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16133}
16134
16135
16136/**
16137 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16138 *
16139 * @returns Merge between @a rcStrict and what the commit operation returned.
16140 * @param pVM The cross context VM structure.
16141 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16142 * @param rcStrict The status code returned by ring-0 or raw-mode.
16143 */
16144VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16145{
16146 /*
16147 * Reset the pending commit.
16148 */
16149 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16150 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16151 ("%#x %#x %#x\n",
16152 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16153 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16154
16155 /*
16156 * Commit the pending bounce buffers (usually just one).
16157 */
16158 unsigned cBufs = 0;
16159 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16160 while (iMemMap-- > 0)
16161 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16162 {
16163 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16164 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16165 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16166
16167 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16168 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16169 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16170
16171 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16172 {
16173 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16174 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16175 pbBuf,
16176 cbFirst,
16177 PGMACCESSORIGIN_IEM);
16178 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16179 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16180 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16181 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16182 }
16183
16184 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16185 {
16186 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16187 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16188 pbBuf + cbFirst,
16189 cbSecond,
16190 PGMACCESSORIGIN_IEM);
16191 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16192 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16193 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16194 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16195 }
16196 cBufs++;
16197 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16198 }
16199
16200 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16201 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16202 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16203 pVCpu->iem.s.cActiveMappings = 0;
16204 return rcStrict;
16205}
16206
16207#endif /* IN_RING3 */
16208
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette