VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 74962

Last change on this file since 74962 was 74901, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 iemExecStatusCodeFiddling fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 633.6 KB
Line 
1/* $Id: IEMAll.cpp 74901 2018-10-18 06:05:41Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in a 64-bit code segment.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Check if we're currently executing in real mode.
335 *
336 * @returns @c true if it is, @c false if not.
337 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
338 */
339#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
340
341/**
342 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
343 * @returns PCCPUMFEATURES
344 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
345 */
346#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
347
348/**
349 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
350 * @returns PCCPUMFEATURES
351 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
352 */
353#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
354
355/**
356 * Evaluates to true if we're presenting an Intel CPU to the guest.
357 */
358#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
359
360/**
361 * Evaluates to true if we're presenting an AMD CPU to the guest.
362 */
363#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
364
365/**
366 * Check if the address is canonical.
367 */
368#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
369
370/**
371 * Gets the effective VEX.VVVV value.
372 *
373 * The 4th bit is ignored if not 64-bit code.
374 * @returns effective V-register value.
375 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
376 */
377#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
378 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
379
380/** @def IEM_USE_UNALIGNED_DATA_ACCESS
381 * Use unaligned accesses instead of elaborate byte assembly. */
382#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
383# define IEM_USE_UNALIGNED_DATA_ACCESS
384#endif
385
386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
387
388/**
389 * Check if the guest has entered VMX root operation.
390 */
391# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
392
393/**
394 * Check if the guest has entered VMX non-root operation.
395 */
396# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
397
398/**
399 * Check if the nested-guest has the given Pin-based VM-execution control set.
400 */
401# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
402 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
403
404/**
405 * Check if the nested-guest has the given Processor-based VM-execution control set.
406 */
407#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
408 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
409
410/**
411 * Check if the nested-guest has the given Secondary Processor-based VM-execution
412 * control set.
413 */
414#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
415 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
416
417/**
418 * Invokes the VMX VM-exit handler for an instruction intercept.
419 */
420# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
421 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
422
423/**
424 * Invokes the VMX VM-exit handler for an instruction intercept where the
425 * instruction provides additional VM-exit information.
426 */
427# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
428 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
429
430/**
431 * Invokes the VMX VM-exit handler for a task switch.
432 */
433# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
434 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
435
436/**
437 * Invokes the VMX VM-exit handler for MWAIT.
438 */
439# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
440 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
441
442/**
443 * Invokes the VMX VM-exit handle for triple faults.
444 */
445# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
446 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
447
448#else
449# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
450# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
451# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
453# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
454# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
457# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
458# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
459
460#endif
461
462#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
463/**
464 * Check if an SVM control/instruction intercept is set.
465 */
466# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
467 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
468
469/**
470 * Check if an SVM read CRx intercept is set.
471 */
472# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
473 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
474
475/**
476 * Check if an SVM write CRx intercept is set.
477 */
478# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
479 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
480
481/**
482 * Check if an SVM read DRx intercept is set.
483 */
484# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
485 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
486
487/**
488 * Check if an SVM write DRx intercept is set.
489 */
490# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
491 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
492
493/**
494 * Check if an SVM exception intercept is set.
495 */
496# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
497 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
498
499/**
500 * Invokes the SVM \#VMEXIT handler for the nested-guest.
501 */
502# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
503 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
504
505/**
506 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
507 * corresponding decode assist information.
508 */
509# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
510 do \
511 { \
512 uint64_t uExitInfo1; \
513 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
514 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
515 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
516 else \
517 uExitInfo1 = 0; \
518 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
519 } while (0)
520
521/** Check and handles SVM nested-guest instruction intercept and updates
522 * NRIP if needed.
523 */
524# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
525 do \
526 { \
527 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
528 { \
529 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
530 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
531 } \
532 } while (0)
533
534/** Checks and handles SVM nested-guest CR0 read intercept. */
535# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
536 do \
537 { \
538 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
539 { /* probably likely */ } \
540 else \
541 { \
542 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
543 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
544 } \
545 } while (0)
546
547/**
548 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
549 */
550# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
551 do { \
552 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
553 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
554 } while (0)
555
556#else
557# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
558# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
559# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
560# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
561# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
562# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
563# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
564# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
565# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
566# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
567# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
568
569#endif
570
571
572/*********************************************************************************************************************************
573* Global Variables *
574*********************************************************************************************************************************/
575extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
576
577
578/** Function table for the ADD instruction. */
579IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
580{
581 iemAImpl_add_u8, iemAImpl_add_u8_locked,
582 iemAImpl_add_u16, iemAImpl_add_u16_locked,
583 iemAImpl_add_u32, iemAImpl_add_u32_locked,
584 iemAImpl_add_u64, iemAImpl_add_u64_locked
585};
586
587/** Function table for the ADC instruction. */
588IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
589{
590 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
591 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
592 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
593 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
594};
595
596/** Function table for the SUB instruction. */
597IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
598{
599 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
600 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
601 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
602 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
603};
604
605/** Function table for the SBB instruction. */
606IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
607{
608 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
609 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
610 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
611 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
612};
613
614/** Function table for the OR instruction. */
615IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
616{
617 iemAImpl_or_u8, iemAImpl_or_u8_locked,
618 iemAImpl_or_u16, iemAImpl_or_u16_locked,
619 iemAImpl_or_u32, iemAImpl_or_u32_locked,
620 iemAImpl_or_u64, iemAImpl_or_u64_locked
621};
622
623/** Function table for the XOR instruction. */
624IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
625{
626 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
627 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
628 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
629 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
630};
631
632/** Function table for the AND instruction. */
633IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
634{
635 iemAImpl_and_u8, iemAImpl_and_u8_locked,
636 iemAImpl_and_u16, iemAImpl_and_u16_locked,
637 iemAImpl_and_u32, iemAImpl_and_u32_locked,
638 iemAImpl_and_u64, iemAImpl_and_u64_locked
639};
640
641/** Function table for the CMP instruction.
642 * @remarks Making operand order ASSUMPTIONS.
643 */
644IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
645{
646 iemAImpl_cmp_u8, NULL,
647 iemAImpl_cmp_u16, NULL,
648 iemAImpl_cmp_u32, NULL,
649 iemAImpl_cmp_u64, NULL
650};
651
652/** Function table for the TEST instruction.
653 * @remarks Making operand order ASSUMPTIONS.
654 */
655IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
656{
657 iemAImpl_test_u8, NULL,
658 iemAImpl_test_u16, NULL,
659 iemAImpl_test_u32, NULL,
660 iemAImpl_test_u64, NULL
661};
662
663/** Function table for the BT instruction. */
664IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
665{
666 NULL, NULL,
667 iemAImpl_bt_u16, NULL,
668 iemAImpl_bt_u32, NULL,
669 iemAImpl_bt_u64, NULL
670};
671
672/** Function table for the BTC instruction. */
673IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
674{
675 NULL, NULL,
676 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
677 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
678 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
679};
680
681/** Function table for the BTR instruction. */
682IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
683{
684 NULL, NULL,
685 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
686 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
687 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
688};
689
690/** Function table for the BTS instruction. */
691IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
692{
693 NULL, NULL,
694 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
695 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
696 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
697};
698
699/** Function table for the BSF instruction. */
700IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
701{
702 NULL, NULL,
703 iemAImpl_bsf_u16, NULL,
704 iemAImpl_bsf_u32, NULL,
705 iemAImpl_bsf_u64, NULL
706};
707
708/** Function table for the BSR instruction. */
709IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
710{
711 NULL, NULL,
712 iemAImpl_bsr_u16, NULL,
713 iemAImpl_bsr_u32, NULL,
714 iemAImpl_bsr_u64, NULL
715};
716
717/** Function table for the IMUL instruction. */
718IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
719{
720 NULL, NULL,
721 iemAImpl_imul_two_u16, NULL,
722 iemAImpl_imul_two_u32, NULL,
723 iemAImpl_imul_two_u64, NULL
724};
725
726/** Group 1 /r lookup table. */
727IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
728{
729 &g_iemAImpl_add,
730 &g_iemAImpl_or,
731 &g_iemAImpl_adc,
732 &g_iemAImpl_sbb,
733 &g_iemAImpl_and,
734 &g_iemAImpl_sub,
735 &g_iemAImpl_xor,
736 &g_iemAImpl_cmp
737};
738
739/** Function table for the INC instruction. */
740IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
741{
742 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
743 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
744 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
745 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
746};
747
748/** Function table for the DEC instruction. */
749IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
750{
751 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
752 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
753 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
754 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
755};
756
757/** Function table for the NEG instruction. */
758IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
759{
760 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
761 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
762 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
763 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
764};
765
766/** Function table for the NOT instruction. */
767IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
768{
769 iemAImpl_not_u8, iemAImpl_not_u8_locked,
770 iemAImpl_not_u16, iemAImpl_not_u16_locked,
771 iemAImpl_not_u32, iemAImpl_not_u32_locked,
772 iemAImpl_not_u64, iemAImpl_not_u64_locked
773};
774
775
776/** Function table for the ROL instruction. */
777IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
778{
779 iemAImpl_rol_u8,
780 iemAImpl_rol_u16,
781 iemAImpl_rol_u32,
782 iemAImpl_rol_u64
783};
784
785/** Function table for the ROR instruction. */
786IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
787{
788 iemAImpl_ror_u8,
789 iemAImpl_ror_u16,
790 iemAImpl_ror_u32,
791 iemAImpl_ror_u64
792};
793
794/** Function table for the RCL instruction. */
795IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
796{
797 iemAImpl_rcl_u8,
798 iemAImpl_rcl_u16,
799 iemAImpl_rcl_u32,
800 iemAImpl_rcl_u64
801};
802
803/** Function table for the RCR instruction. */
804IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
805{
806 iemAImpl_rcr_u8,
807 iemAImpl_rcr_u16,
808 iemAImpl_rcr_u32,
809 iemAImpl_rcr_u64
810};
811
812/** Function table for the SHL instruction. */
813IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
814{
815 iemAImpl_shl_u8,
816 iemAImpl_shl_u16,
817 iemAImpl_shl_u32,
818 iemAImpl_shl_u64
819};
820
821/** Function table for the SHR instruction. */
822IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
823{
824 iemAImpl_shr_u8,
825 iemAImpl_shr_u16,
826 iemAImpl_shr_u32,
827 iemAImpl_shr_u64
828};
829
830/** Function table for the SAR instruction. */
831IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
832{
833 iemAImpl_sar_u8,
834 iemAImpl_sar_u16,
835 iemAImpl_sar_u32,
836 iemAImpl_sar_u64
837};
838
839
840/** Function table for the MUL instruction. */
841IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
842{
843 iemAImpl_mul_u8,
844 iemAImpl_mul_u16,
845 iemAImpl_mul_u32,
846 iemAImpl_mul_u64
847};
848
849/** Function table for the IMUL instruction working implicitly on rAX. */
850IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
851{
852 iemAImpl_imul_u8,
853 iemAImpl_imul_u16,
854 iemAImpl_imul_u32,
855 iemAImpl_imul_u64
856};
857
858/** Function table for the DIV instruction. */
859IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
860{
861 iemAImpl_div_u8,
862 iemAImpl_div_u16,
863 iemAImpl_div_u32,
864 iemAImpl_div_u64
865};
866
867/** Function table for the MUL instruction. */
868IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
869{
870 iemAImpl_idiv_u8,
871 iemAImpl_idiv_u16,
872 iemAImpl_idiv_u32,
873 iemAImpl_idiv_u64
874};
875
876/** Function table for the SHLD instruction */
877IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
878{
879 iemAImpl_shld_u16,
880 iemAImpl_shld_u32,
881 iemAImpl_shld_u64,
882};
883
884/** Function table for the SHRD instruction */
885IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
886{
887 iemAImpl_shrd_u16,
888 iemAImpl_shrd_u32,
889 iemAImpl_shrd_u64,
890};
891
892
893/** Function table for the PUNPCKLBW instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
895/** Function table for the PUNPCKLBD instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
897/** Function table for the PUNPCKLDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
899/** Function table for the PUNPCKLQDQ instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
901
902/** Function table for the PUNPCKHBW instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
904/** Function table for the PUNPCKHBD instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
906/** Function table for the PUNPCKHDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
908/** Function table for the PUNPCKHQDQ instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
910
911/** Function table for the PXOR instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
913/** Function table for the PCMPEQB instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
915/** Function table for the PCMPEQW instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
917/** Function table for the PCMPEQD instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
919
920
921#if defined(IEM_LOG_MEMORY_WRITES)
922/** What IEM just wrote. */
923uint8_t g_abIemWrote[256];
924/** How much IEM just wrote. */
925size_t g_cbIemWrote;
926#endif
927
928
929/*********************************************************************************************************************************
930* Internal Functions *
931*********************************************************************************************************************************/
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
934IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
935IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
936/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
937IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
940IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
942IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
943IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
946IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
947IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
948IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
949#ifdef IEM_WITH_SETJMP
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
953DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
955#endif
956
957IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
958IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
959IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
970IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
971IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
972IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
973IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
974
975#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
978 uint8_t cbInstr);
979IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
982#endif
983
984#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
985IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
986IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr,
987 uint64_t uCr2);
988#endif
989
990
991/**
992 * Sets the pass up status.
993 *
994 * @returns VINF_SUCCESS.
995 * @param pVCpu The cross context virtual CPU structure of the
996 * calling thread.
997 * @param rcPassUp The pass up status. Must be informational.
998 * VINF_SUCCESS is not allowed.
999 */
1000IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1001{
1002 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1003
1004 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1005 if (rcOldPassUp == VINF_SUCCESS)
1006 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1007 /* If both are EM scheduling codes, use EM priority rules. */
1008 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1009 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1010 {
1011 if (rcPassUp < rcOldPassUp)
1012 {
1013 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1014 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1015 }
1016 else
1017 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1018 }
1019 /* Override EM scheduling with specific status code. */
1020 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1021 {
1022 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1023 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1024 }
1025 /* Don't override specific status code, first come first served. */
1026 else
1027 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1028 return VINF_SUCCESS;
1029}
1030
1031
1032/**
1033 * Calculates the CPU mode.
1034 *
1035 * This is mainly for updating IEMCPU::enmCpuMode.
1036 *
1037 * @returns CPU mode.
1038 * @param pVCpu The cross context virtual CPU structure of the
1039 * calling thread.
1040 */
1041DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1042{
1043 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1044 return IEMMODE_64BIT;
1045 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1046 return IEMMODE_32BIT;
1047 return IEMMODE_16BIT;
1048}
1049
1050
1051/**
1052 * Initializes the execution state.
1053 *
1054 * @param pVCpu The cross context virtual CPU structure of the
1055 * calling thread.
1056 * @param fBypassHandlers Whether to bypass access handlers.
1057 *
1058 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1059 * side-effects in strict builds.
1060 */
1061DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1062{
1063 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1064 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1065
1066#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1075#endif
1076
1077#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1078 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1079#endif
1080 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1081 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1082#ifdef VBOX_STRICT
1083 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1084 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1085 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1086 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1087 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1088 pVCpu->iem.s.uRexReg = 127;
1089 pVCpu->iem.s.uRexB = 127;
1090 pVCpu->iem.s.offModRm = 127;
1091 pVCpu->iem.s.uRexIndex = 127;
1092 pVCpu->iem.s.iEffSeg = 127;
1093 pVCpu->iem.s.idxPrefix = 127;
1094 pVCpu->iem.s.uVex3rdReg = 127;
1095 pVCpu->iem.s.uVexLength = 127;
1096 pVCpu->iem.s.fEvexStuff = 127;
1097 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1098# ifdef IEM_WITH_CODE_TLB
1099 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1100 pVCpu->iem.s.pbInstrBuf = NULL;
1101 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1102 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1103 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1104 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1105# else
1106 pVCpu->iem.s.offOpcode = 127;
1107 pVCpu->iem.s.cbOpcode = 127;
1108# endif
1109#endif
1110
1111 pVCpu->iem.s.cActiveMappings = 0;
1112 pVCpu->iem.s.iNextMapping = 0;
1113 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1114 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1115#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1116 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1117 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1118 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1119 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1120 if (!pVCpu->iem.s.fInPatchCode)
1121 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1122#endif
1123}
1124
1125#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1126/**
1127 * Performs a minimal reinitialization of the execution state.
1128 *
1129 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1130 * 'world-switch' types operations on the CPU. Currently only nested
1131 * hardware-virtualization uses it.
1132 *
1133 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1134 */
1135IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1136{
1137 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1138 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1139
1140 pVCpu->iem.s.uCpl = uCpl;
1141 pVCpu->iem.s.enmCpuMode = enmMode;
1142 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1143 pVCpu->iem.s.enmEffAddrMode = enmMode;
1144 if (enmMode != IEMMODE_64BIT)
1145 {
1146 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1147 pVCpu->iem.s.enmEffOpSize = enmMode;
1148 }
1149 else
1150 {
1151 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1152 pVCpu->iem.s.enmEffOpSize = enmMode;
1153 }
1154 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1155#ifndef IEM_WITH_CODE_TLB
1156 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1157 pVCpu->iem.s.offOpcode = 0;
1158 pVCpu->iem.s.cbOpcode = 0;
1159#endif
1160 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1161}
1162#endif
1163
1164/**
1165 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1166 *
1167 * @param pVCpu The cross context virtual CPU structure of the
1168 * calling thread.
1169 */
1170DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1171{
1172 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1173#ifdef VBOX_STRICT
1174# ifdef IEM_WITH_CODE_TLB
1175 NOREF(pVCpu);
1176# else
1177 pVCpu->iem.s.cbOpcode = 0;
1178# endif
1179#else
1180 NOREF(pVCpu);
1181#endif
1182}
1183
1184
1185/**
1186 * Initializes the decoder state.
1187 *
1188 * iemReInitDecoder is mostly a copy of this function.
1189 *
1190 * @param pVCpu The cross context virtual CPU structure of the
1191 * calling thread.
1192 * @param fBypassHandlers Whether to bypass access handlers.
1193 */
1194DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1195{
1196 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1197 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1198
1199#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1208#endif
1209
1210#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1211 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1212#endif
1213 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1214 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1215 pVCpu->iem.s.enmCpuMode = enmMode;
1216 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1217 pVCpu->iem.s.enmEffAddrMode = enmMode;
1218 if (enmMode != IEMMODE_64BIT)
1219 {
1220 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1221 pVCpu->iem.s.enmEffOpSize = enmMode;
1222 }
1223 else
1224 {
1225 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1226 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1227 }
1228 pVCpu->iem.s.fPrefixes = 0;
1229 pVCpu->iem.s.uRexReg = 0;
1230 pVCpu->iem.s.uRexB = 0;
1231 pVCpu->iem.s.uRexIndex = 0;
1232 pVCpu->iem.s.idxPrefix = 0;
1233 pVCpu->iem.s.uVex3rdReg = 0;
1234 pVCpu->iem.s.uVexLength = 0;
1235 pVCpu->iem.s.fEvexStuff = 0;
1236 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1237#ifdef IEM_WITH_CODE_TLB
1238 pVCpu->iem.s.pbInstrBuf = NULL;
1239 pVCpu->iem.s.offInstrNextByte = 0;
1240 pVCpu->iem.s.offCurInstrStart = 0;
1241# ifdef VBOX_STRICT
1242 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1243 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1244 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1245# endif
1246#else
1247 pVCpu->iem.s.offOpcode = 0;
1248 pVCpu->iem.s.cbOpcode = 0;
1249#endif
1250 pVCpu->iem.s.offModRm = 0;
1251 pVCpu->iem.s.cActiveMappings = 0;
1252 pVCpu->iem.s.iNextMapping = 0;
1253 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1254 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1255#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1256 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1257 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1258 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1259 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1260 if (!pVCpu->iem.s.fInPatchCode)
1261 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1262#endif
1263
1264#ifdef DBGFTRACE_ENABLED
1265 switch (enmMode)
1266 {
1267 case IEMMODE_64BIT:
1268 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1269 break;
1270 case IEMMODE_32BIT:
1271 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1272 break;
1273 case IEMMODE_16BIT:
1274 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1275 break;
1276 }
1277#endif
1278}
1279
1280
1281/**
1282 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1283 *
1284 * This is mostly a copy of iemInitDecoder.
1285 *
1286 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1287 */
1288DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1289{
1290 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1291
1292#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1293 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1294 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1295 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1298 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1300 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1301#endif
1302
1303 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1304 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1305 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1306 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1307 pVCpu->iem.s.enmEffAddrMode = enmMode;
1308 if (enmMode != IEMMODE_64BIT)
1309 {
1310 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1311 pVCpu->iem.s.enmEffOpSize = enmMode;
1312 }
1313 else
1314 {
1315 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1316 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1317 }
1318 pVCpu->iem.s.fPrefixes = 0;
1319 pVCpu->iem.s.uRexReg = 0;
1320 pVCpu->iem.s.uRexB = 0;
1321 pVCpu->iem.s.uRexIndex = 0;
1322 pVCpu->iem.s.idxPrefix = 0;
1323 pVCpu->iem.s.uVex3rdReg = 0;
1324 pVCpu->iem.s.uVexLength = 0;
1325 pVCpu->iem.s.fEvexStuff = 0;
1326 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1327#ifdef IEM_WITH_CODE_TLB
1328 if (pVCpu->iem.s.pbInstrBuf)
1329 {
1330 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1331 - pVCpu->iem.s.uInstrBufPc;
1332 if (off < pVCpu->iem.s.cbInstrBufTotal)
1333 {
1334 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1335 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1336 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1337 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1338 else
1339 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1340 }
1341 else
1342 {
1343 pVCpu->iem.s.pbInstrBuf = NULL;
1344 pVCpu->iem.s.offInstrNextByte = 0;
1345 pVCpu->iem.s.offCurInstrStart = 0;
1346 pVCpu->iem.s.cbInstrBuf = 0;
1347 pVCpu->iem.s.cbInstrBufTotal = 0;
1348 }
1349 }
1350 else
1351 {
1352 pVCpu->iem.s.offInstrNextByte = 0;
1353 pVCpu->iem.s.offCurInstrStart = 0;
1354 pVCpu->iem.s.cbInstrBuf = 0;
1355 pVCpu->iem.s.cbInstrBufTotal = 0;
1356 }
1357#else
1358 pVCpu->iem.s.cbOpcode = 0;
1359 pVCpu->iem.s.offOpcode = 0;
1360#endif
1361 pVCpu->iem.s.offModRm = 0;
1362 Assert(pVCpu->iem.s.cActiveMappings == 0);
1363 pVCpu->iem.s.iNextMapping = 0;
1364 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1365 Assert(pVCpu->iem.s.fBypassHandlers == false);
1366#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1367 if (!pVCpu->iem.s.fInPatchCode)
1368 { /* likely */ }
1369 else
1370 {
1371 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1372 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1373 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1374 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1375 if (!pVCpu->iem.s.fInPatchCode)
1376 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1377 }
1378#endif
1379
1380#ifdef DBGFTRACE_ENABLED
1381 switch (enmMode)
1382 {
1383 case IEMMODE_64BIT:
1384 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1385 break;
1386 case IEMMODE_32BIT:
1387 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1388 break;
1389 case IEMMODE_16BIT:
1390 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1391 break;
1392 }
1393#endif
1394}
1395
1396
1397
1398/**
1399 * Prefetch opcodes the first time when starting executing.
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pVCpu The cross context virtual CPU structure of the
1403 * calling thread.
1404 * @param fBypassHandlers Whether to bypass access handlers.
1405 */
1406IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1407{
1408 iemInitDecoder(pVCpu, fBypassHandlers);
1409
1410#ifdef IEM_WITH_CODE_TLB
1411 /** @todo Do ITLB lookup here. */
1412
1413#else /* !IEM_WITH_CODE_TLB */
1414
1415 /*
1416 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1417 *
1418 * First translate CS:rIP to a physical address.
1419 */
1420 uint32_t cbToTryRead;
1421 RTGCPTR GCPtrPC;
1422 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1423 {
1424 cbToTryRead = PAGE_SIZE;
1425 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1426 if (IEM_IS_CANONICAL(GCPtrPC))
1427 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1428 else
1429 return iemRaiseGeneralProtectionFault0(pVCpu);
1430 }
1431 else
1432 {
1433 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1434 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1435 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1436 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1437 else
1438 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1439 if (cbToTryRead) { /* likely */ }
1440 else /* overflowed */
1441 {
1442 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1443 cbToTryRead = UINT32_MAX;
1444 }
1445 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1446 Assert(GCPtrPC <= UINT32_MAX);
1447 }
1448
1449# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1450 /* Allow interpretation of patch manager code blocks since they can for
1451 instance throw #PFs for perfectly good reasons. */
1452 if (pVCpu->iem.s.fInPatchCode)
1453 {
1454 size_t cbRead = 0;
1455 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1456 AssertRCReturn(rc, rc);
1457 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1458 return VINF_SUCCESS;
1459 }
1460# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1461
1462 RTGCPHYS GCPhys;
1463 uint64_t fFlags;
1464 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1465 if (RT_SUCCESS(rc)) { /* probable */ }
1466 else
1467 {
1468 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1469 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1470 }
1471 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1472 else
1473 {
1474 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1475 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1476 }
1477 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1478 else
1479 {
1480 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1481 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1482 }
1483 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1484 /** @todo Check reserved bits and such stuff. PGM is better at doing
1485 * that, so do it when implementing the guest virtual address
1486 * TLB... */
1487
1488 /*
1489 * Read the bytes at this address.
1490 */
1491 PVM pVM = pVCpu->CTX_SUFF(pVM);
1492# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1493 size_t cbActual;
1494 if ( PATMIsEnabled(pVM)
1495 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1496 {
1497 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1498 Assert(cbActual > 0);
1499 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1500 }
1501 else
1502# endif
1503 {
1504 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1505 if (cbToTryRead > cbLeftOnPage)
1506 cbToTryRead = cbLeftOnPage;
1507 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1508 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1509
1510 if (!pVCpu->iem.s.fBypassHandlers)
1511 {
1512 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1513 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1514 { /* likely */ }
1515 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1516 {
1517 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1518 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1519 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1520 }
1521 else
1522 {
1523 Log((RT_SUCCESS(rcStrict)
1524 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1525 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1526 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1527 return rcStrict;
1528 }
1529 }
1530 else
1531 {
1532 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1533 if (RT_SUCCESS(rc))
1534 { /* likely */ }
1535 else
1536 {
1537 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1538 GCPtrPC, GCPhys, rc, cbToTryRead));
1539 return rc;
1540 }
1541 }
1542 pVCpu->iem.s.cbOpcode = cbToTryRead;
1543 }
1544#endif /* !IEM_WITH_CODE_TLB */
1545 return VINF_SUCCESS;
1546}
1547
1548
1549/**
1550 * Invalidates the IEM TLBs.
1551 *
1552 * This is called internally as well as by PGM when moving GC mappings.
1553 *
1554 * @returns
1555 * @param pVCpu The cross context virtual CPU structure of the calling
1556 * thread.
1557 * @param fVmm Set when PGM calls us with a remapping.
1558 */
1559VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1560{
1561#ifdef IEM_WITH_CODE_TLB
1562 pVCpu->iem.s.cbInstrBufTotal = 0;
1563 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1564 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1565 { /* very likely */ }
1566 else
1567 {
1568 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1569 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1570 while (i-- > 0)
1571 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1572 }
1573#endif
1574
1575#ifdef IEM_WITH_DATA_TLB
1576 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1577 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1578 { /* very likely */ }
1579 else
1580 {
1581 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1582 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1583 while (i-- > 0)
1584 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1585 }
1586#endif
1587 NOREF(pVCpu); NOREF(fVmm);
1588}
1589
1590
1591/**
1592 * Invalidates a page in the TLBs.
1593 *
1594 * @param pVCpu The cross context virtual CPU structure of the calling
1595 * thread.
1596 * @param GCPtr The address of the page to invalidate
1597 */
1598VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1599{
1600#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1601 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1602 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1603 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1604 uintptr_t idx = (uint8_t)GCPtr;
1605
1606# ifdef IEM_WITH_CODE_TLB
1607 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1608 {
1609 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1610 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1611 pVCpu->iem.s.cbInstrBufTotal = 0;
1612 }
1613# endif
1614
1615# ifdef IEM_WITH_DATA_TLB
1616 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1617 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1618# endif
1619#else
1620 NOREF(pVCpu); NOREF(GCPtr);
1621#endif
1622}
1623
1624
1625/**
1626 * Invalidates the host physical aspects of the IEM TLBs.
1627 *
1628 * This is called internally as well as by PGM when moving GC mappings.
1629 *
1630 * @param pVCpu The cross context virtual CPU structure of the calling
1631 * thread.
1632 */
1633VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1634{
1635#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1636 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1637
1638# ifdef IEM_WITH_CODE_TLB
1639 pVCpu->iem.s.cbInstrBufTotal = 0;
1640# endif
1641 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1642 if (uTlbPhysRev != 0)
1643 {
1644 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1645 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1646 }
1647 else
1648 {
1649 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1650 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1651
1652 unsigned i;
1653# ifdef IEM_WITH_CODE_TLB
1654 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1655 while (i-- > 0)
1656 {
1657 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1658 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1659 }
1660# endif
1661# ifdef IEM_WITH_DATA_TLB
1662 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1663 while (i-- > 0)
1664 {
1665 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1666 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1667 }
1668# endif
1669 }
1670#else
1671 NOREF(pVCpu);
1672#endif
1673}
1674
1675
1676/**
1677 * Invalidates the host physical aspects of the IEM TLBs.
1678 *
1679 * This is called internally as well as by PGM when moving GC mappings.
1680 *
1681 * @param pVM The cross context VM structure.
1682 *
1683 * @remarks Caller holds the PGM lock.
1684 */
1685VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1686{
1687 RT_NOREF_PV(pVM);
1688}
1689
1690#ifdef IEM_WITH_CODE_TLB
1691
1692/**
1693 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1694 * failure and jumps.
1695 *
1696 * We end up here for a number of reasons:
1697 * - pbInstrBuf isn't yet initialized.
1698 * - Advancing beyond the buffer boundrary (e.g. cross page).
1699 * - Advancing beyond the CS segment limit.
1700 * - Fetching from non-mappable page (e.g. MMIO).
1701 *
1702 * @param pVCpu The cross context virtual CPU structure of the
1703 * calling thread.
1704 * @param pvDst Where to return the bytes.
1705 * @param cbDst Number of bytes to read.
1706 *
1707 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1708 */
1709IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1710{
1711#ifdef IN_RING3
1712 for (;;)
1713 {
1714 Assert(cbDst <= 8);
1715 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1716
1717 /*
1718 * We might have a partial buffer match, deal with that first to make the
1719 * rest simpler. This is the first part of the cross page/buffer case.
1720 */
1721 if (pVCpu->iem.s.pbInstrBuf != NULL)
1722 {
1723 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1724 {
1725 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1726 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1727 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1728
1729 cbDst -= cbCopy;
1730 pvDst = (uint8_t *)pvDst + cbCopy;
1731 offBuf += cbCopy;
1732 pVCpu->iem.s.offInstrNextByte += offBuf;
1733 }
1734 }
1735
1736 /*
1737 * Check segment limit, figuring how much we're allowed to access at this point.
1738 *
1739 * We will fault immediately if RIP is past the segment limit / in non-canonical
1740 * territory. If we do continue, there are one or more bytes to read before we
1741 * end up in trouble and we need to do that first before faulting.
1742 */
1743 RTGCPTR GCPtrFirst;
1744 uint32_t cbMaxRead;
1745 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1746 {
1747 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1748 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1749 { /* likely */ }
1750 else
1751 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1752 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1753 }
1754 else
1755 {
1756 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1757 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1758 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1759 { /* likely */ }
1760 else
1761 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1762 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1763 if (cbMaxRead != 0)
1764 { /* likely */ }
1765 else
1766 {
1767 /* Overflowed because address is 0 and limit is max. */
1768 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1769 cbMaxRead = X86_PAGE_SIZE;
1770 }
1771 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1772 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1773 if (cbMaxRead2 < cbMaxRead)
1774 cbMaxRead = cbMaxRead2;
1775 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1776 }
1777
1778 /*
1779 * Get the TLB entry for this piece of code.
1780 */
1781 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1782 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1783 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1784 if (pTlbe->uTag == uTag)
1785 {
1786 /* likely when executing lots of code, otherwise unlikely */
1787# ifdef VBOX_WITH_STATISTICS
1788 pVCpu->iem.s.CodeTlb.cTlbHits++;
1789# endif
1790 }
1791 else
1792 {
1793 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1794# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1795 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1796 {
1797 pTlbe->uTag = uTag;
1798 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1799 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1800 pTlbe->GCPhys = NIL_RTGCPHYS;
1801 pTlbe->pbMappingR3 = NULL;
1802 }
1803 else
1804# endif
1805 {
1806 RTGCPHYS GCPhys;
1807 uint64_t fFlags;
1808 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1809 if (RT_FAILURE(rc))
1810 {
1811 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1812 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1813 }
1814
1815 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1816 pTlbe->uTag = uTag;
1817 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1818 pTlbe->GCPhys = GCPhys;
1819 pTlbe->pbMappingR3 = NULL;
1820 }
1821 }
1822
1823 /*
1824 * Check TLB page table level access flags.
1825 */
1826 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1827 {
1828 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1829 {
1830 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1831 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1832 }
1833 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1834 {
1835 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1836 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1837 }
1838 }
1839
1840# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1841 /*
1842 * Allow interpretation of patch manager code blocks since they can for
1843 * instance throw #PFs for perfectly good reasons.
1844 */
1845 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1846 { /* no unlikely */ }
1847 else
1848 {
1849 /** @todo Could be optimized this a little in ring-3 if we liked. */
1850 size_t cbRead = 0;
1851 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1852 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1853 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1854 return;
1855 }
1856# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1857
1858 /*
1859 * Look up the physical page info if necessary.
1860 */
1861 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1862 { /* not necessary */ }
1863 else
1864 {
1865 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1866 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1867 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1868 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1869 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1870 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1871 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1872 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1873 }
1874
1875# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1876 /*
1877 * Try do a direct read using the pbMappingR3 pointer.
1878 */
1879 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1880 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1881 {
1882 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1883 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1884 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1885 {
1886 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1887 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1888 }
1889 else
1890 {
1891 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1892 Assert(cbInstr < cbMaxRead);
1893 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1894 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1895 }
1896 if (cbDst <= cbMaxRead)
1897 {
1898 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1899 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1900 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1901 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1902 return;
1903 }
1904 pVCpu->iem.s.pbInstrBuf = NULL;
1905
1906 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1907 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1908 }
1909 else
1910# endif
1911#if 0
1912 /*
1913 * If there is no special read handling, so we can read a bit more and
1914 * put it in the prefetch buffer.
1915 */
1916 if ( cbDst < cbMaxRead
1917 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1918 {
1919 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1920 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1921 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1922 { /* likely */ }
1923 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1924 {
1925 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1926 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1927 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1928 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1929 }
1930 else
1931 {
1932 Log((RT_SUCCESS(rcStrict)
1933 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1934 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1935 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1936 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1937 }
1938 }
1939 /*
1940 * Special read handling, so only read exactly what's needed.
1941 * This is a highly unlikely scenario.
1942 */
1943 else
1944#endif
1945 {
1946 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1947 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1948 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1949 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1950 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1951 { /* likely */ }
1952 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1953 {
1954 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1955 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1956 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1957 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1958 }
1959 else
1960 {
1961 Log((RT_SUCCESS(rcStrict)
1962 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1963 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1964 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1965 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1966 }
1967 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1968 if (cbToRead == cbDst)
1969 return;
1970 }
1971
1972 /*
1973 * More to read, loop.
1974 */
1975 cbDst -= cbMaxRead;
1976 pvDst = (uint8_t *)pvDst + cbMaxRead;
1977 }
1978#else
1979 RT_NOREF(pvDst, cbDst);
1980 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1981#endif
1982}
1983
1984#else
1985
1986/**
1987 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1988 * exception if it fails.
1989 *
1990 * @returns Strict VBox status code.
1991 * @param pVCpu The cross context virtual CPU structure of the
1992 * calling thread.
1993 * @param cbMin The minimum number of bytes relative offOpcode
1994 * that must be read.
1995 */
1996IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1997{
1998 /*
1999 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2000 *
2001 * First translate CS:rIP to a physical address.
2002 */
2003 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2004 uint32_t cbToTryRead;
2005 RTGCPTR GCPtrNext;
2006 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2007 {
2008 cbToTryRead = PAGE_SIZE;
2009 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2010 if (!IEM_IS_CANONICAL(GCPtrNext))
2011 return iemRaiseGeneralProtectionFault0(pVCpu);
2012 }
2013 else
2014 {
2015 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2016 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2017 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2018 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2019 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2020 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2021 if (!cbToTryRead) /* overflowed */
2022 {
2023 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2024 cbToTryRead = UINT32_MAX;
2025 /** @todo check out wrapping around the code segment. */
2026 }
2027 if (cbToTryRead < cbMin - cbLeft)
2028 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2029 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2030 }
2031
2032 /* Only read up to the end of the page, and make sure we don't read more
2033 than the opcode buffer can hold. */
2034 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2035 if (cbToTryRead > cbLeftOnPage)
2036 cbToTryRead = cbLeftOnPage;
2037 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2038 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2039/** @todo r=bird: Convert assertion into undefined opcode exception? */
2040 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2041
2042# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2043 /* Allow interpretation of patch manager code blocks since they can for
2044 instance throw #PFs for perfectly good reasons. */
2045 if (pVCpu->iem.s.fInPatchCode)
2046 {
2047 size_t cbRead = 0;
2048 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2049 AssertRCReturn(rc, rc);
2050 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2051 return VINF_SUCCESS;
2052 }
2053# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2054
2055 RTGCPHYS GCPhys;
2056 uint64_t fFlags;
2057 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2058 if (RT_FAILURE(rc))
2059 {
2060 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2061 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2062 }
2063 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2064 {
2065 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2066 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2067 }
2068 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2069 {
2070 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2071 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2072 }
2073 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2074 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2075 /** @todo Check reserved bits and such stuff. PGM is better at doing
2076 * that, so do it when implementing the guest virtual address
2077 * TLB... */
2078
2079 /*
2080 * Read the bytes at this address.
2081 *
2082 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2083 * and since PATM should only patch the start of an instruction there
2084 * should be no need to check again here.
2085 */
2086 if (!pVCpu->iem.s.fBypassHandlers)
2087 {
2088 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2089 cbToTryRead, PGMACCESSORIGIN_IEM);
2090 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2091 { /* likely */ }
2092 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2093 {
2094 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2095 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2096 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2097 }
2098 else
2099 {
2100 Log((RT_SUCCESS(rcStrict)
2101 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2102 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2103 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2104 return rcStrict;
2105 }
2106 }
2107 else
2108 {
2109 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2110 if (RT_SUCCESS(rc))
2111 { /* likely */ }
2112 else
2113 {
2114 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2115 return rc;
2116 }
2117 }
2118 pVCpu->iem.s.cbOpcode += cbToTryRead;
2119 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2120
2121 return VINF_SUCCESS;
2122}
2123
2124#endif /* !IEM_WITH_CODE_TLB */
2125#ifndef IEM_WITH_SETJMP
2126
2127/**
2128 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2129 *
2130 * @returns Strict VBox status code.
2131 * @param pVCpu The cross context virtual CPU structure of the
2132 * calling thread.
2133 * @param pb Where to return the opcode byte.
2134 */
2135DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2136{
2137 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2138 if (rcStrict == VINF_SUCCESS)
2139 {
2140 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2141 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2142 pVCpu->iem.s.offOpcode = offOpcode + 1;
2143 }
2144 else
2145 *pb = 0;
2146 return rcStrict;
2147}
2148
2149
2150/**
2151 * Fetches the next opcode byte.
2152 *
2153 * @returns Strict VBox status code.
2154 * @param pVCpu The cross context virtual CPU structure of the
2155 * calling thread.
2156 * @param pu8 Where to return the opcode byte.
2157 */
2158DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2159{
2160 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2161 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2162 {
2163 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2164 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2165 return VINF_SUCCESS;
2166 }
2167 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2168}
2169
2170#else /* IEM_WITH_SETJMP */
2171
2172/**
2173 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2174 *
2175 * @returns The opcode byte.
2176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2177 */
2178DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2179{
2180# ifdef IEM_WITH_CODE_TLB
2181 uint8_t u8;
2182 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2183 return u8;
2184# else
2185 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2186 if (rcStrict == VINF_SUCCESS)
2187 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2188 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2189# endif
2190}
2191
2192
2193/**
2194 * Fetches the next opcode byte, longjmp on error.
2195 *
2196 * @returns The opcode byte.
2197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2198 */
2199DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2200{
2201# ifdef IEM_WITH_CODE_TLB
2202 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2203 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2204 if (RT_LIKELY( pbBuf != NULL
2205 && offBuf < pVCpu->iem.s.cbInstrBuf))
2206 {
2207 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2208 return pbBuf[offBuf];
2209 }
2210# else
2211 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2212 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2213 {
2214 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2215 return pVCpu->iem.s.abOpcode[offOpcode];
2216 }
2217# endif
2218 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2219}
2220
2221#endif /* IEM_WITH_SETJMP */
2222
2223/**
2224 * Fetches the next opcode byte, returns automatically on failure.
2225 *
2226 * @param a_pu8 Where to return the opcode byte.
2227 * @remark Implicitly references pVCpu.
2228 */
2229#ifndef IEM_WITH_SETJMP
2230# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2231 do \
2232 { \
2233 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2234 if (rcStrict2 == VINF_SUCCESS) \
2235 { /* likely */ } \
2236 else \
2237 return rcStrict2; \
2238 } while (0)
2239#else
2240# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2241#endif /* IEM_WITH_SETJMP */
2242
2243
2244#ifndef IEM_WITH_SETJMP
2245/**
2246 * Fetches the next signed byte from the opcode stream.
2247 *
2248 * @returns Strict VBox status code.
2249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2250 * @param pi8 Where to return the signed byte.
2251 */
2252DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2253{
2254 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2255}
2256#endif /* !IEM_WITH_SETJMP */
2257
2258
2259/**
2260 * Fetches the next signed byte from the opcode stream, returning automatically
2261 * on failure.
2262 *
2263 * @param a_pi8 Where to return the signed byte.
2264 * @remark Implicitly references pVCpu.
2265 */
2266#ifndef IEM_WITH_SETJMP
2267# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2268 do \
2269 { \
2270 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2271 if (rcStrict2 != VINF_SUCCESS) \
2272 return rcStrict2; \
2273 } while (0)
2274#else /* IEM_WITH_SETJMP */
2275# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2276
2277#endif /* IEM_WITH_SETJMP */
2278
2279#ifndef IEM_WITH_SETJMP
2280
2281/**
2282 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2283 *
2284 * @returns Strict VBox status code.
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param pu16 Where to return the opcode dword.
2287 */
2288DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2289{
2290 uint8_t u8;
2291 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2292 if (rcStrict == VINF_SUCCESS)
2293 *pu16 = (int8_t)u8;
2294 return rcStrict;
2295}
2296
2297
2298/**
2299 * Fetches the next signed byte from the opcode stream, extending it to
2300 * unsigned 16-bit.
2301 *
2302 * @returns Strict VBox status code.
2303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2304 * @param pu16 Where to return the unsigned word.
2305 */
2306DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2307{
2308 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2309 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2310 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2311
2312 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2313 pVCpu->iem.s.offOpcode = offOpcode + 1;
2314 return VINF_SUCCESS;
2315}
2316
2317#endif /* !IEM_WITH_SETJMP */
2318
2319/**
2320 * Fetches the next signed byte from the opcode stream and sign-extending it to
2321 * a word, returning automatically on failure.
2322 *
2323 * @param a_pu16 Where to return the word.
2324 * @remark Implicitly references pVCpu.
2325 */
2326#ifndef IEM_WITH_SETJMP
2327# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2328 do \
2329 { \
2330 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2331 if (rcStrict2 != VINF_SUCCESS) \
2332 return rcStrict2; \
2333 } while (0)
2334#else
2335# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2336#endif
2337
2338#ifndef IEM_WITH_SETJMP
2339
2340/**
2341 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2342 *
2343 * @returns Strict VBox status code.
2344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2345 * @param pu32 Where to return the opcode dword.
2346 */
2347DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2348{
2349 uint8_t u8;
2350 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2351 if (rcStrict == VINF_SUCCESS)
2352 *pu32 = (int8_t)u8;
2353 return rcStrict;
2354}
2355
2356
2357/**
2358 * Fetches the next signed byte from the opcode stream, extending it to
2359 * unsigned 32-bit.
2360 *
2361 * @returns Strict VBox status code.
2362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2363 * @param pu32 Where to return the unsigned dword.
2364 */
2365DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2366{
2367 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2368 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2369 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2370
2371 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2372 pVCpu->iem.s.offOpcode = offOpcode + 1;
2373 return VINF_SUCCESS;
2374}
2375
2376#endif /* !IEM_WITH_SETJMP */
2377
2378/**
2379 * Fetches the next signed byte from the opcode stream and sign-extending it to
2380 * a word, returning automatically on failure.
2381 *
2382 * @param a_pu32 Where to return the word.
2383 * @remark Implicitly references pVCpu.
2384 */
2385#ifndef IEM_WITH_SETJMP
2386#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2387 do \
2388 { \
2389 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2390 if (rcStrict2 != VINF_SUCCESS) \
2391 return rcStrict2; \
2392 } while (0)
2393#else
2394# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2395#endif
2396
2397#ifndef IEM_WITH_SETJMP
2398
2399/**
2400 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2401 *
2402 * @returns Strict VBox status code.
2403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2404 * @param pu64 Where to return the opcode qword.
2405 */
2406DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2407{
2408 uint8_t u8;
2409 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2410 if (rcStrict == VINF_SUCCESS)
2411 *pu64 = (int8_t)u8;
2412 return rcStrict;
2413}
2414
2415
2416/**
2417 * Fetches the next signed byte from the opcode stream, extending it to
2418 * unsigned 64-bit.
2419 *
2420 * @returns Strict VBox status code.
2421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2422 * @param pu64 Where to return the unsigned qword.
2423 */
2424DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2425{
2426 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2427 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2428 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2429
2430 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2431 pVCpu->iem.s.offOpcode = offOpcode + 1;
2432 return VINF_SUCCESS;
2433}
2434
2435#endif /* !IEM_WITH_SETJMP */
2436
2437
2438/**
2439 * Fetches the next signed byte from the opcode stream and sign-extending it to
2440 * a word, returning automatically on failure.
2441 *
2442 * @param a_pu64 Where to return the word.
2443 * @remark Implicitly references pVCpu.
2444 */
2445#ifndef IEM_WITH_SETJMP
2446# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2447 do \
2448 { \
2449 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2450 if (rcStrict2 != VINF_SUCCESS) \
2451 return rcStrict2; \
2452 } while (0)
2453#else
2454# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2455#endif
2456
2457
2458#ifndef IEM_WITH_SETJMP
2459/**
2460 * Fetches the next opcode byte.
2461 *
2462 * @returns Strict VBox status code.
2463 * @param pVCpu The cross context virtual CPU structure of the
2464 * calling thread.
2465 * @param pu8 Where to return the opcode byte.
2466 */
2467DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2468{
2469 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2470 pVCpu->iem.s.offModRm = offOpcode;
2471 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2472 {
2473 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2474 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2475 return VINF_SUCCESS;
2476 }
2477 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2478}
2479#else /* IEM_WITH_SETJMP */
2480/**
2481 * Fetches the next opcode byte, longjmp on error.
2482 *
2483 * @returns The opcode byte.
2484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2485 */
2486DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2487{
2488# ifdef IEM_WITH_CODE_TLB
2489 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2490 pVCpu->iem.s.offModRm = offBuf;
2491 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2492 if (RT_LIKELY( pbBuf != NULL
2493 && offBuf < pVCpu->iem.s.cbInstrBuf))
2494 {
2495 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2496 return pbBuf[offBuf];
2497 }
2498# else
2499 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2500 pVCpu->iem.s.offModRm = offOpcode;
2501 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2502 {
2503 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2504 return pVCpu->iem.s.abOpcode[offOpcode];
2505 }
2506# endif
2507 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2508}
2509#endif /* IEM_WITH_SETJMP */
2510
2511/**
2512 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2513 * on failure.
2514 *
2515 * Will note down the position of the ModR/M byte for VT-x exits.
2516 *
2517 * @param a_pbRm Where to return the RM opcode byte.
2518 * @remark Implicitly references pVCpu.
2519 */
2520#ifndef IEM_WITH_SETJMP
2521# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2522 do \
2523 { \
2524 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2525 if (rcStrict2 == VINF_SUCCESS) \
2526 { /* likely */ } \
2527 else \
2528 return rcStrict2; \
2529 } while (0)
2530#else
2531# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2532#endif /* IEM_WITH_SETJMP */
2533
2534
2535#ifndef IEM_WITH_SETJMP
2536
2537/**
2538 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2539 *
2540 * @returns Strict VBox status code.
2541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2542 * @param pu16 Where to return the opcode word.
2543 */
2544DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2545{
2546 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2547 if (rcStrict == VINF_SUCCESS)
2548 {
2549 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2550# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2551 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2552# else
2553 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2554# endif
2555 pVCpu->iem.s.offOpcode = offOpcode + 2;
2556 }
2557 else
2558 *pu16 = 0;
2559 return rcStrict;
2560}
2561
2562
2563/**
2564 * Fetches the next opcode word.
2565 *
2566 * @returns Strict VBox status code.
2567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2568 * @param pu16 Where to return the opcode word.
2569 */
2570DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2571{
2572 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2573 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2574 {
2575 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2576# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2577 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2578# else
2579 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2580# endif
2581 return VINF_SUCCESS;
2582 }
2583 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2584}
2585
2586#else /* IEM_WITH_SETJMP */
2587
2588/**
2589 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2590 *
2591 * @returns The opcode word.
2592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2593 */
2594DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2595{
2596# ifdef IEM_WITH_CODE_TLB
2597 uint16_t u16;
2598 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2599 return u16;
2600# else
2601 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2602 if (rcStrict == VINF_SUCCESS)
2603 {
2604 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2605 pVCpu->iem.s.offOpcode += 2;
2606# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2607 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2608# else
2609 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2610# endif
2611 }
2612 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2613# endif
2614}
2615
2616
2617/**
2618 * Fetches the next opcode word, longjmp on error.
2619 *
2620 * @returns The opcode word.
2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2622 */
2623DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2624{
2625# ifdef IEM_WITH_CODE_TLB
2626 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2627 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2628 if (RT_LIKELY( pbBuf != NULL
2629 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2630 {
2631 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2632# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2633 return *(uint16_t const *)&pbBuf[offBuf];
2634# else
2635 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2636# endif
2637 }
2638# else
2639 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2640 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2641 {
2642 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2643# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2644 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2645# else
2646 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2647# endif
2648 }
2649# endif
2650 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2651}
2652
2653#endif /* IEM_WITH_SETJMP */
2654
2655
2656/**
2657 * Fetches the next opcode word, returns automatically on failure.
2658 *
2659 * @param a_pu16 Where to return the opcode word.
2660 * @remark Implicitly references pVCpu.
2661 */
2662#ifndef IEM_WITH_SETJMP
2663# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2664 do \
2665 { \
2666 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2667 if (rcStrict2 != VINF_SUCCESS) \
2668 return rcStrict2; \
2669 } while (0)
2670#else
2671# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2672#endif
2673
2674#ifndef IEM_WITH_SETJMP
2675
2676/**
2677 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2678 *
2679 * @returns Strict VBox status code.
2680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2681 * @param pu32 Where to return the opcode double word.
2682 */
2683DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2684{
2685 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2686 if (rcStrict == VINF_SUCCESS)
2687 {
2688 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2689 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2690 pVCpu->iem.s.offOpcode = offOpcode + 2;
2691 }
2692 else
2693 *pu32 = 0;
2694 return rcStrict;
2695}
2696
2697
2698/**
2699 * Fetches the next opcode word, zero extending it to a double word.
2700 *
2701 * @returns Strict VBox status code.
2702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2703 * @param pu32 Where to return the opcode double word.
2704 */
2705DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2706{
2707 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2708 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2709 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2710
2711 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2712 pVCpu->iem.s.offOpcode = offOpcode + 2;
2713 return VINF_SUCCESS;
2714}
2715
2716#endif /* !IEM_WITH_SETJMP */
2717
2718
2719/**
2720 * Fetches the next opcode word and zero extends it to a double word, returns
2721 * automatically on failure.
2722 *
2723 * @param a_pu32 Where to return the opcode double word.
2724 * @remark Implicitly references pVCpu.
2725 */
2726#ifndef IEM_WITH_SETJMP
2727# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2728 do \
2729 { \
2730 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2731 if (rcStrict2 != VINF_SUCCESS) \
2732 return rcStrict2; \
2733 } while (0)
2734#else
2735# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2736#endif
2737
2738#ifndef IEM_WITH_SETJMP
2739
2740/**
2741 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2742 *
2743 * @returns Strict VBox status code.
2744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2745 * @param pu64 Where to return the opcode quad word.
2746 */
2747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2748{
2749 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2750 if (rcStrict == VINF_SUCCESS)
2751 {
2752 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2753 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2754 pVCpu->iem.s.offOpcode = offOpcode + 2;
2755 }
2756 else
2757 *pu64 = 0;
2758 return rcStrict;
2759}
2760
2761
2762/**
2763 * Fetches the next opcode word, zero extending it to a quad word.
2764 *
2765 * @returns Strict VBox status code.
2766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2767 * @param pu64 Where to return the opcode quad word.
2768 */
2769DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2770{
2771 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2772 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2773 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2774
2775 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2776 pVCpu->iem.s.offOpcode = offOpcode + 2;
2777 return VINF_SUCCESS;
2778}
2779
2780#endif /* !IEM_WITH_SETJMP */
2781
2782/**
2783 * Fetches the next opcode word and zero extends it to a quad word, returns
2784 * automatically on failure.
2785 *
2786 * @param a_pu64 Where to return the opcode quad word.
2787 * @remark Implicitly references pVCpu.
2788 */
2789#ifndef IEM_WITH_SETJMP
2790# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2791 do \
2792 { \
2793 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2794 if (rcStrict2 != VINF_SUCCESS) \
2795 return rcStrict2; \
2796 } while (0)
2797#else
2798# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2799#endif
2800
2801
2802#ifndef IEM_WITH_SETJMP
2803/**
2804 * Fetches the next signed word from the opcode stream.
2805 *
2806 * @returns Strict VBox status code.
2807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2808 * @param pi16 Where to return the signed word.
2809 */
2810DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2811{
2812 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2813}
2814#endif /* !IEM_WITH_SETJMP */
2815
2816
2817/**
2818 * Fetches the next signed word from the opcode stream, returning automatically
2819 * on failure.
2820 *
2821 * @param a_pi16 Where to return the signed word.
2822 * @remark Implicitly references pVCpu.
2823 */
2824#ifndef IEM_WITH_SETJMP
2825# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2826 do \
2827 { \
2828 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2829 if (rcStrict2 != VINF_SUCCESS) \
2830 return rcStrict2; \
2831 } while (0)
2832#else
2833# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2834#endif
2835
2836#ifndef IEM_WITH_SETJMP
2837
2838/**
2839 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2840 *
2841 * @returns Strict VBox status code.
2842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2843 * @param pu32 Where to return the opcode dword.
2844 */
2845DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2846{
2847 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2848 if (rcStrict == VINF_SUCCESS)
2849 {
2850 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2851# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2852 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2853# else
2854 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2855 pVCpu->iem.s.abOpcode[offOpcode + 1],
2856 pVCpu->iem.s.abOpcode[offOpcode + 2],
2857 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2858# endif
2859 pVCpu->iem.s.offOpcode = offOpcode + 4;
2860 }
2861 else
2862 *pu32 = 0;
2863 return rcStrict;
2864}
2865
2866
2867/**
2868 * Fetches the next opcode dword.
2869 *
2870 * @returns Strict VBox status code.
2871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2872 * @param pu32 Where to return the opcode double word.
2873 */
2874DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2875{
2876 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2877 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2878 {
2879 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2880# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2881 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2882# else
2883 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2887# endif
2888 return VINF_SUCCESS;
2889 }
2890 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2891}
2892
2893#else /* !IEM_WITH_SETJMP */
2894
2895/**
2896 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2897 *
2898 * @returns The opcode dword.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 */
2901DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2902{
2903# ifdef IEM_WITH_CODE_TLB
2904 uint32_t u32;
2905 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2906 return u32;
2907# else
2908 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2909 if (rcStrict == VINF_SUCCESS)
2910 {
2911 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2912 pVCpu->iem.s.offOpcode = offOpcode + 4;
2913# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2914 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2915# else
2916 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2917 pVCpu->iem.s.abOpcode[offOpcode + 1],
2918 pVCpu->iem.s.abOpcode[offOpcode + 2],
2919 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2920# endif
2921 }
2922 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2923# endif
2924}
2925
2926
2927/**
2928 * Fetches the next opcode dword, longjmp on error.
2929 *
2930 * @returns The opcode dword.
2931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2932 */
2933DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2934{
2935# ifdef IEM_WITH_CODE_TLB
2936 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2937 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2938 if (RT_LIKELY( pbBuf != NULL
2939 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2940 {
2941 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2942# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2943 return *(uint32_t const *)&pbBuf[offBuf];
2944# else
2945 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2946 pbBuf[offBuf + 1],
2947 pbBuf[offBuf + 2],
2948 pbBuf[offBuf + 3]);
2949# endif
2950 }
2951# else
2952 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2953 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2954 {
2955 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2956# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2957 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2958# else
2959 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2960 pVCpu->iem.s.abOpcode[offOpcode + 1],
2961 pVCpu->iem.s.abOpcode[offOpcode + 2],
2962 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2963# endif
2964 }
2965# endif
2966 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2967}
2968
2969#endif /* !IEM_WITH_SETJMP */
2970
2971
2972/**
2973 * Fetches the next opcode dword, returns automatically on failure.
2974 *
2975 * @param a_pu32 Where to return the opcode dword.
2976 * @remark Implicitly references pVCpu.
2977 */
2978#ifndef IEM_WITH_SETJMP
2979# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2980 do \
2981 { \
2982 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2983 if (rcStrict2 != VINF_SUCCESS) \
2984 return rcStrict2; \
2985 } while (0)
2986#else
2987# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2988#endif
2989
2990#ifndef IEM_WITH_SETJMP
2991
2992/**
2993 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2994 *
2995 * @returns Strict VBox status code.
2996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2997 * @param pu64 Where to return the opcode dword.
2998 */
2999DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3000{
3001 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3002 if (rcStrict == VINF_SUCCESS)
3003 {
3004 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3005 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3006 pVCpu->iem.s.abOpcode[offOpcode + 1],
3007 pVCpu->iem.s.abOpcode[offOpcode + 2],
3008 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3009 pVCpu->iem.s.offOpcode = offOpcode + 4;
3010 }
3011 else
3012 *pu64 = 0;
3013 return rcStrict;
3014}
3015
3016
3017/**
3018 * Fetches the next opcode dword, zero extending it to a quad word.
3019 *
3020 * @returns Strict VBox status code.
3021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3022 * @param pu64 Where to return the opcode quad word.
3023 */
3024DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3025{
3026 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3027 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3028 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3029
3030 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3031 pVCpu->iem.s.abOpcode[offOpcode + 1],
3032 pVCpu->iem.s.abOpcode[offOpcode + 2],
3033 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3034 pVCpu->iem.s.offOpcode = offOpcode + 4;
3035 return VINF_SUCCESS;
3036}
3037
3038#endif /* !IEM_WITH_SETJMP */
3039
3040
3041/**
3042 * Fetches the next opcode dword and zero extends it to a quad word, returns
3043 * automatically on failure.
3044 *
3045 * @param a_pu64 Where to return the opcode quad word.
3046 * @remark Implicitly references pVCpu.
3047 */
3048#ifndef IEM_WITH_SETJMP
3049# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3050 do \
3051 { \
3052 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3053 if (rcStrict2 != VINF_SUCCESS) \
3054 return rcStrict2; \
3055 } while (0)
3056#else
3057# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3058#endif
3059
3060
3061#ifndef IEM_WITH_SETJMP
3062/**
3063 * Fetches the next signed double word from the opcode stream.
3064 *
3065 * @returns Strict VBox status code.
3066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3067 * @param pi32 Where to return the signed double word.
3068 */
3069DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3070{
3071 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3072}
3073#endif
3074
3075/**
3076 * Fetches the next signed double word from the opcode stream, returning
3077 * automatically on failure.
3078 *
3079 * @param a_pi32 Where to return the signed double word.
3080 * @remark Implicitly references pVCpu.
3081 */
3082#ifndef IEM_WITH_SETJMP
3083# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3084 do \
3085 { \
3086 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3087 if (rcStrict2 != VINF_SUCCESS) \
3088 return rcStrict2; \
3089 } while (0)
3090#else
3091# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3092#endif
3093
3094#ifndef IEM_WITH_SETJMP
3095
3096/**
3097 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3098 *
3099 * @returns Strict VBox status code.
3100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3101 * @param pu64 Where to return the opcode qword.
3102 */
3103DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3104{
3105 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3106 if (rcStrict == VINF_SUCCESS)
3107 {
3108 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3109 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3110 pVCpu->iem.s.abOpcode[offOpcode + 1],
3111 pVCpu->iem.s.abOpcode[offOpcode + 2],
3112 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3113 pVCpu->iem.s.offOpcode = offOpcode + 4;
3114 }
3115 else
3116 *pu64 = 0;
3117 return rcStrict;
3118}
3119
3120
3121/**
3122 * Fetches the next opcode dword, sign extending it into a quad word.
3123 *
3124 * @returns Strict VBox status code.
3125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3126 * @param pu64 Where to return the opcode quad word.
3127 */
3128DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3129{
3130 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3131 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3132 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3133
3134 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3135 pVCpu->iem.s.abOpcode[offOpcode + 1],
3136 pVCpu->iem.s.abOpcode[offOpcode + 2],
3137 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3138 *pu64 = i32;
3139 pVCpu->iem.s.offOpcode = offOpcode + 4;
3140 return VINF_SUCCESS;
3141}
3142
3143#endif /* !IEM_WITH_SETJMP */
3144
3145
3146/**
3147 * Fetches the next opcode double word and sign extends it to a quad word,
3148 * returns automatically on failure.
3149 *
3150 * @param a_pu64 Where to return the opcode quad word.
3151 * @remark Implicitly references pVCpu.
3152 */
3153#ifndef IEM_WITH_SETJMP
3154# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3155 do \
3156 { \
3157 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3158 if (rcStrict2 != VINF_SUCCESS) \
3159 return rcStrict2; \
3160 } while (0)
3161#else
3162# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3163#endif
3164
3165#ifndef IEM_WITH_SETJMP
3166
3167/**
3168 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3169 *
3170 * @returns Strict VBox status code.
3171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3172 * @param pu64 Where to return the opcode qword.
3173 */
3174DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3175{
3176 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3177 if (rcStrict == VINF_SUCCESS)
3178 {
3179 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3180# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3181 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3182# else
3183 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3184 pVCpu->iem.s.abOpcode[offOpcode + 1],
3185 pVCpu->iem.s.abOpcode[offOpcode + 2],
3186 pVCpu->iem.s.abOpcode[offOpcode + 3],
3187 pVCpu->iem.s.abOpcode[offOpcode + 4],
3188 pVCpu->iem.s.abOpcode[offOpcode + 5],
3189 pVCpu->iem.s.abOpcode[offOpcode + 6],
3190 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3191# endif
3192 pVCpu->iem.s.offOpcode = offOpcode + 8;
3193 }
3194 else
3195 *pu64 = 0;
3196 return rcStrict;
3197}
3198
3199
3200/**
3201 * Fetches the next opcode qword.
3202 *
3203 * @returns Strict VBox status code.
3204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3205 * @param pu64 Where to return the opcode qword.
3206 */
3207DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3208{
3209 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3210 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3211 {
3212# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3213 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3214# else
3215 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3216 pVCpu->iem.s.abOpcode[offOpcode + 1],
3217 pVCpu->iem.s.abOpcode[offOpcode + 2],
3218 pVCpu->iem.s.abOpcode[offOpcode + 3],
3219 pVCpu->iem.s.abOpcode[offOpcode + 4],
3220 pVCpu->iem.s.abOpcode[offOpcode + 5],
3221 pVCpu->iem.s.abOpcode[offOpcode + 6],
3222 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3223# endif
3224 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3225 return VINF_SUCCESS;
3226 }
3227 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3228}
3229
3230#else /* IEM_WITH_SETJMP */
3231
3232/**
3233 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3234 *
3235 * @returns The opcode qword.
3236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3237 */
3238DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3239{
3240# ifdef IEM_WITH_CODE_TLB
3241 uint64_t u64;
3242 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3243 return u64;
3244# else
3245 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3246 if (rcStrict == VINF_SUCCESS)
3247 {
3248 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3249 pVCpu->iem.s.offOpcode = offOpcode + 8;
3250# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3251 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3252# else
3253 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3254 pVCpu->iem.s.abOpcode[offOpcode + 1],
3255 pVCpu->iem.s.abOpcode[offOpcode + 2],
3256 pVCpu->iem.s.abOpcode[offOpcode + 3],
3257 pVCpu->iem.s.abOpcode[offOpcode + 4],
3258 pVCpu->iem.s.abOpcode[offOpcode + 5],
3259 pVCpu->iem.s.abOpcode[offOpcode + 6],
3260 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3261# endif
3262 }
3263 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3264# endif
3265}
3266
3267
3268/**
3269 * Fetches the next opcode qword, longjmp on error.
3270 *
3271 * @returns The opcode qword.
3272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3273 */
3274DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3275{
3276# ifdef IEM_WITH_CODE_TLB
3277 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3278 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3279 if (RT_LIKELY( pbBuf != NULL
3280 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3281 {
3282 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3283# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3284 return *(uint64_t const *)&pbBuf[offBuf];
3285# else
3286 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3287 pbBuf[offBuf + 1],
3288 pbBuf[offBuf + 2],
3289 pbBuf[offBuf + 3],
3290 pbBuf[offBuf + 4],
3291 pbBuf[offBuf + 5],
3292 pbBuf[offBuf + 6],
3293 pbBuf[offBuf + 7]);
3294# endif
3295 }
3296# else
3297 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3298 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3299 {
3300 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3301# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3302 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3303# else
3304 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3305 pVCpu->iem.s.abOpcode[offOpcode + 1],
3306 pVCpu->iem.s.abOpcode[offOpcode + 2],
3307 pVCpu->iem.s.abOpcode[offOpcode + 3],
3308 pVCpu->iem.s.abOpcode[offOpcode + 4],
3309 pVCpu->iem.s.abOpcode[offOpcode + 5],
3310 pVCpu->iem.s.abOpcode[offOpcode + 6],
3311 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3312# endif
3313 }
3314# endif
3315 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3316}
3317
3318#endif /* IEM_WITH_SETJMP */
3319
3320/**
3321 * Fetches the next opcode quad word, returns automatically on failure.
3322 *
3323 * @param a_pu64 Where to return the opcode quad word.
3324 * @remark Implicitly references pVCpu.
3325 */
3326#ifndef IEM_WITH_SETJMP
3327# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3328 do \
3329 { \
3330 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3331 if (rcStrict2 != VINF_SUCCESS) \
3332 return rcStrict2; \
3333 } while (0)
3334#else
3335# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3336#endif
3337
3338
3339/** @name Misc Worker Functions.
3340 * @{
3341 */
3342
3343/**
3344 * Gets the exception class for the specified exception vector.
3345 *
3346 * @returns The class of the specified exception.
3347 * @param uVector The exception vector.
3348 */
3349IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3350{
3351 Assert(uVector <= X86_XCPT_LAST);
3352 switch (uVector)
3353 {
3354 case X86_XCPT_DE:
3355 case X86_XCPT_TS:
3356 case X86_XCPT_NP:
3357 case X86_XCPT_SS:
3358 case X86_XCPT_GP:
3359 case X86_XCPT_SX: /* AMD only */
3360 return IEMXCPTCLASS_CONTRIBUTORY;
3361
3362 case X86_XCPT_PF:
3363 case X86_XCPT_VE: /* Intel only */
3364 return IEMXCPTCLASS_PAGE_FAULT;
3365
3366 case X86_XCPT_DF:
3367 return IEMXCPTCLASS_DOUBLE_FAULT;
3368 }
3369 return IEMXCPTCLASS_BENIGN;
3370}
3371
3372
3373/**
3374 * Evaluates how to handle an exception caused during delivery of another event
3375 * (exception / interrupt).
3376 *
3377 * @returns How to handle the recursive exception.
3378 * @param pVCpu The cross context virtual CPU structure of the
3379 * calling thread.
3380 * @param fPrevFlags The flags of the previous event.
3381 * @param uPrevVector The vector of the previous event.
3382 * @param fCurFlags The flags of the current exception.
3383 * @param uCurVector The vector of the current exception.
3384 * @param pfXcptRaiseInfo Where to store additional information about the
3385 * exception condition. Optional.
3386 */
3387VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3388 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3389{
3390 /*
3391 * Only CPU exceptions can be raised while delivering other events, software interrupt
3392 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3393 */
3394 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3395 Assert(pVCpu); RT_NOREF(pVCpu);
3396 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3397
3398 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3399 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3400 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3401 {
3402 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3403 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3404 {
3405 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3406 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3407 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3408 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3409 {
3410 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3411 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3412 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3413 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3414 uCurVector, pVCpu->cpum.GstCtx.cr2));
3415 }
3416 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3417 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3418 {
3419 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3420 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3421 }
3422 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3423 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3424 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3425 {
3426 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3427 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3428 }
3429 }
3430 else
3431 {
3432 if (uPrevVector == X86_XCPT_NMI)
3433 {
3434 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3435 if (uCurVector == X86_XCPT_PF)
3436 {
3437 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3438 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3439 }
3440 }
3441 else if ( uPrevVector == X86_XCPT_AC
3442 && uCurVector == X86_XCPT_AC)
3443 {
3444 enmRaise = IEMXCPTRAISE_CPU_HANG;
3445 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3446 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3447 }
3448 }
3449 }
3450 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3451 {
3452 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3453 if (uCurVector == X86_XCPT_PF)
3454 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3455 }
3456 else
3457 {
3458 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3459 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3460 }
3461
3462 if (pfXcptRaiseInfo)
3463 *pfXcptRaiseInfo = fRaiseInfo;
3464 return enmRaise;
3465}
3466
3467
3468/**
3469 * Enters the CPU shutdown state initiated by a triple fault or other
3470 * unrecoverable conditions.
3471 *
3472 * @returns Strict VBox status code.
3473 * @param pVCpu The cross context virtual CPU structure of the
3474 * calling thread.
3475 */
3476IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3477{
3478 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3479 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3480
3481 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3482 {
3483 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3484 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3485 }
3486
3487 RT_NOREF(pVCpu);
3488 return VINF_EM_TRIPLE_FAULT;
3489}
3490
3491
3492/**
3493 * Validates a new SS segment.
3494 *
3495 * @returns VBox strict status code.
3496 * @param pVCpu The cross context virtual CPU structure of the
3497 * calling thread.
3498 * @param NewSS The new SS selctor.
3499 * @param uCpl The CPL to load the stack for.
3500 * @param pDesc Where to return the descriptor.
3501 */
3502IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3503{
3504 /* Null selectors are not allowed (we're not called for dispatching
3505 interrupts with SS=0 in long mode). */
3506 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3507 {
3508 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3509 return iemRaiseTaskSwitchFault0(pVCpu);
3510 }
3511
3512 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3513 if ((NewSS & X86_SEL_RPL) != uCpl)
3514 {
3515 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3516 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3517 }
3518
3519 /*
3520 * Read the descriptor.
3521 */
3522 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3523 if (rcStrict != VINF_SUCCESS)
3524 return rcStrict;
3525
3526 /*
3527 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3528 */
3529 if (!pDesc->Legacy.Gen.u1DescType)
3530 {
3531 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3532 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3533 }
3534
3535 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3536 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3537 {
3538 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3539 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3540 }
3541 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3542 {
3543 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3544 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3545 }
3546
3547 /* Is it there? */
3548 /** @todo testcase: Is this checked before the canonical / limit check below? */
3549 if (!pDesc->Legacy.Gen.u1Present)
3550 {
3551 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3552 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3553 }
3554
3555 return VINF_SUCCESS;
3556}
3557
3558
3559/**
3560 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3561 * not.
3562 *
3563 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3564 */
3565#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3566# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3567#else
3568# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3569#endif
3570
3571/**
3572 * Updates the EFLAGS in the correct manner wrt. PATM.
3573 *
3574 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3575 * @param a_fEfl The new EFLAGS.
3576 */
3577#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3578# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3579#else
3580# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3581#endif
3582
3583
3584/** @} */
3585
3586/** @name Raising Exceptions.
3587 *
3588 * @{
3589 */
3590
3591
3592/**
3593 * Loads the specified stack far pointer from the TSS.
3594 *
3595 * @returns VBox strict status code.
3596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3597 * @param uCpl The CPL to load the stack for.
3598 * @param pSelSS Where to return the new stack segment.
3599 * @param puEsp Where to return the new stack pointer.
3600 */
3601IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3602{
3603 VBOXSTRICTRC rcStrict;
3604 Assert(uCpl < 4);
3605
3606 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3607 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3608 {
3609 /*
3610 * 16-bit TSS (X86TSS16).
3611 */
3612 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3613 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3614 {
3615 uint32_t off = uCpl * 4 + 2;
3616 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3617 {
3618 /** @todo check actual access pattern here. */
3619 uint32_t u32Tmp = 0; /* gcc maybe... */
3620 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3621 if (rcStrict == VINF_SUCCESS)
3622 {
3623 *puEsp = RT_LOWORD(u32Tmp);
3624 *pSelSS = RT_HIWORD(u32Tmp);
3625 return VINF_SUCCESS;
3626 }
3627 }
3628 else
3629 {
3630 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3631 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3632 }
3633 break;
3634 }
3635
3636 /*
3637 * 32-bit TSS (X86TSS32).
3638 */
3639 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3640 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3641 {
3642 uint32_t off = uCpl * 8 + 4;
3643 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3644 {
3645/** @todo check actual access pattern here. */
3646 uint64_t u64Tmp;
3647 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3648 if (rcStrict == VINF_SUCCESS)
3649 {
3650 *puEsp = u64Tmp & UINT32_MAX;
3651 *pSelSS = (RTSEL)(u64Tmp >> 32);
3652 return VINF_SUCCESS;
3653 }
3654 }
3655 else
3656 {
3657 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3658 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3659 }
3660 break;
3661 }
3662
3663 default:
3664 AssertFailed();
3665 rcStrict = VERR_IEM_IPE_4;
3666 break;
3667 }
3668
3669 *puEsp = 0; /* make gcc happy */
3670 *pSelSS = 0; /* make gcc happy */
3671 return rcStrict;
3672}
3673
3674
3675/**
3676 * Loads the specified stack pointer from the 64-bit TSS.
3677 *
3678 * @returns VBox strict status code.
3679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3680 * @param uCpl The CPL to load the stack for.
3681 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3682 * @param puRsp Where to return the new stack pointer.
3683 */
3684IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3685{
3686 Assert(uCpl < 4);
3687 Assert(uIst < 8);
3688 *puRsp = 0; /* make gcc happy */
3689
3690 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3691 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3692
3693 uint32_t off;
3694 if (uIst)
3695 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3696 else
3697 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3698 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3699 {
3700 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3701 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3702 }
3703
3704 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3705}
3706
3707
3708/**
3709 * Adjust the CPU state according to the exception being raised.
3710 *
3711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3712 * @param u8Vector The exception that has been raised.
3713 */
3714DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3715{
3716 switch (u8Vector)
3717 {
3718 case X86_XCPT_DB:
3719 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3720 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3721 break;
3722 /** @todo Read the AMD and Intel exception reference... */
3723 }
3724}
3725
3726
3727/**
3728 * Implements exceptions and interrupts for real mode.
3729 *
3730 * @returns VBox strict status code.
3731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3732 * @param cbInstr The number of bytes to offset rIP by in the return
3733 * address.
3734 * @param u8Vector The interrupt / exception vector number.
3735 * @param fFlags The flags.
3736 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3737 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3738 */
3739IEM_STATIC VBOXSTRICTRC
3740iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3741 uint8_t cbInstr,
3742 uint8_t u8Vector,
3743 uint32_t fFlags,
3744 uint16_t uErr,
3745 uint64_t uCr2)
3746{
3747 NOREF(uErr); NOREF(uCr2);
3748 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3749
3750 /*
3751 * Read the IDT entry.
3752 */
3753 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3754 {
3755 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3756 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3757 }
3758 RTFAR16 Idte;
3759 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3760 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3761 {
3762 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3763 return rcStrict;
3764 }
3765
3766 /*
3767 * Push the stack frame.
3768 */
3769 uint16_t *pu16Frame;
3770 uint64_t uNewRsp;
3771 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3772 if (rcStrict != VINF_SUCCESS)
3773 return rcStrict;
3774
3775 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3776#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3777 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3778 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3779 fEfl |= UINT16_C(0xf000);
3780#endif
3781 pu16Frame[2] = (uint16_t)fEfl;
3782 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3783 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3784 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3785 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3786 return rcStrict;
3787
3788 /*
3789 * Load the vector address into cs:ip and make exception specific state
3790 * adjustments.
3791 */
3792 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3793 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3794 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3795 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3796 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3797 pVCpu->cpum.GstCtx.rip = Idte.off;
3798 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3799 IEMMISC_SET_EFL(pVCpu, fEfl);
3800
3801 /** @todo do we actually do this in real mode? */
3802 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3803 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3804
3805 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3806}
3807
3808
3809/**
3810 * Loads a NULL data selector into when coming from V8086 mode.
3811 *
3812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3813 * @param pSReg Pointer to the segment register.
3814 */
3815IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3816{
3817 pSReg->Sel = 0;
3818 pSReg->ValidSel = 0;
3819 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3820 {
3821 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3822 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3823 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3824 }
3825 else
3826 {
3827 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3828 /** @todo check this on AMD-V */
3829 pSReg->u64Base = 0;
3830 pSReg->u32Limit = 0;
3831 }
3832}
3833
3834
3835/**
3836 * Loads a segment selector during a task switch in V8086 mode.
3837 *
3838 * @param pSReg Pointer to the segment register.
3839 * @param uSel The selector value to load.
3840 */
3841IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3842{
3843 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3844 pSReg->Sel = uSel;
3845 pSReg->ValidSel = uSel;
3846 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3847 pSReg->u64Base = uSel << 4;
3848 pSReg->u32Limit = 0xffff;
3849 pSReg->Attr.u = 0xf3;
3850}
3851
3852
3853/**
3854 * Loads a NULL data selector into a selector register, both the hidden and
3855 * visible parts, in protected mode.
3856 *
3857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3858 * @param pSReg Pointer to the segment register.
3859 * @param uRpl The RPL.
3860 */
3861IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3862{
3863 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3864 * data selector in protected mode. */
3865 pSReg->Sel = uRpl;
3866 pSReg->ValidSel = uRpl;
3867 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3868 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3869 {
3870 /* VT-x (Intel 3960x) observed doing something like this. */
3871 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3872 pSReg->u32Limit = UINT32_MAX;
3873 pSReg->u64Base = 0;
3874 }
3875 else
3876 {
3877 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3878 pSReg->u32Limit = 0;
3879 pSReg->u64Base = 0;
3880 }
3881}
3882
3883
3884/**
3885 * Loads a segment selector during a task switch in protected mode.
3886 *
3887 * In this task switch scenario, we would throw \#TS exceptions rather than
3888 * \#GPs.
3889 *
3890 * @returns VBox strict status code.
3891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3892 * @param pSReg Pointer to the segment register.
3893 * @param uSel The new selector value.
3894 *
3895 * @remarks This does _not_ handle CS or SS.
3896 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3897 */
3898IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3899{
3900 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3901
3902 /* Null data selector. */
3903 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3904 {
3905 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3906 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3907 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3908 return VINF_SUCCESS;
3909 }
3910
3911 /* Fetch the descriptor. */
3912 IEMSELDESC Desc;
3913 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3914 if (rcStrict != VINF_SUCCESS)
3915 {
3916 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3917 VBOXSTRICTRC_VAL(rcStrict)));
3918 return rcStrict;
3919 }
3920
3921 /* Must be a data segment or readable code segment. */
3922 if ( !Desc.Legacy.Gen.u1DescType
3923 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3924 {
3925 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3926 Desc.Legacy.Gen.u4Type));
3927 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3928 }
3929
3930 /* Check privileges for data segments and non-conforming code segments. */
3931 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3932 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3933 {
3934 /* The RPL and the new CPL must be less than or equal to the DPL. */
3935 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3936 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3937 {
3938 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3939 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3941 }
3942 }
3943
3944 /* Is it there? */
3945 if (!Desc.Legacy.Gen.u1Present)
3946 {
3947 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3948 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3949 }
3950
3951 /* The base and limit. */
3952 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3953 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3954
3955 /*
3956 * Ok, everything checked out fine. Now set the accessed bit before
3957 * committing the result into the registers.
3958 */
3959 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3960 {
3961 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3962 if (rcStrict != VINF_SUCCESS)
3963 return rcStrict;
3964 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3965 }
3966
3967 /* Commit */
3968 pSReg->Sel = uSel;
3969 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3970 pSReg->u32Limit = cbLimit;
3971 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3972 pSReg->ValidSel = uSel;
3973 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3974 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3975 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3976
3977 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3978 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3979 return VINF_SUCCESS;
3980}
3981
3982
3983/**
3984 * Performs a task switch.
3985 *
3986 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3987 * caller is responsible for performing the necessary checks (like DPL, TSS
3988 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3989 * reference for JMP, CALL, IRET.
3990 *
3991 * If the task switch is the due to a software interrupt or hardware exception,
3992 * the caller is responsible for validating the TSS selector and descriptor. See
3993 * Intel Instruction reference for INT n.
3994 *
3995 * @returns VBox strict status code.
3996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3997 * @param enmTaskSwitch The cause of the task switch.
3998 * @param uNextEip The EIP effective after the task switch.
3999 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4000 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4001 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4002 * @param SelTSS The TSS selector of the new task.
4003 * @param pNewDescTSS Pointer to the new TSS descriptor.
4004 */
4005IEM_STATIC VBOXSTRICTRC
4006iemTaskSwitch(PVMCPU pVCpu,
4007 IEMTASKSWITCH enmTaskSwitch,
4008 uint32_t uNextEip,
4009 uint32_t fFlags,
4010 uint16_t uErr,
4011 uint64_t uCr2,
4012 RTSEL SelTSS,
4013 PIEMSELDESC pNewDescTSS)
4014{
4015 Assert(!IEM_IS_REAL_MODE(pVCpu));
4016 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4017 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4018
4019 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4020 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4021 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4022 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4023 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4024
4025 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4026 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4027
4028 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4029 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4030
4031 /* Update CR2 in case it's a page-fault. */
4032 /** @todo This should probably be done much earlier in IEM/PGM. See
4033 * @bugref{5653#c49}. */
4034 if (fFlags & IEM_XCPT_FLAGS_CR2)
4035 pVCpu->cpum.GstCtx.cr2 = uCr2;
4036
4037 /*
4038 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4039 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4040 */
4041 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4042 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4043 if (uNewTSSLimit < uNewTSSLimitMin)
4044 {
4045 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4046 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4047 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4048 }
4049
4050 /*
4051 * Task switches in VMX non-root mode always cause task switches.
4052 * The new TSS must have been read and validated (DPL, limits etc.) before a
4053 * task-switch VM-exit commences.
4054 *
4055 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4056 */
4057 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4058 {
4059 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4060 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4061 }
4062
4063 /*
4064 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4065 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4066 */
4067 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4068 {
4069 uint32_t const uExitInfo1 = SelTSS;
4070 uint32_t uExitInfo2 = uErr;
4071 switch (enmTaskSwitch)
4072 {
4073 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4074 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4075 default: break;
4076 }
4077 if (fFlags & IEM_XCPT_FLAGS_ERR)
4078 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4079 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4080 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4081
4082 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4083 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4084 RT_NOREF2(uExitInfo1, uExitInfo2);
4085 }
4086
4087 /*
4088 * Check the current TSS limit. The last written byte to the current TSS during the
4089 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4090 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4091 *
4092 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4093 * end up with smaller than "legal" TSS limits.
4094 */
4095 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4096 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4097 if (uCurTSSLimit < uCurTSSLimitMin)
4098 {
4099 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4100 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4101 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4102 }
4103
4104 /*
4105 * Verify that the new TSS can be accessed and map it. Map only the required contents
4106 * and not the entire TSS.
4107 */
4108 void *pvNewTSS;
4109 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4110 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4111 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4112 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4113 * not perform correct translation if this happens. See Intel spec. 7.2.1
4114 * "Task-State Segment" */
4115 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4116 if (rcStrict != VINF_SUCCESS)
4117 {
4118 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4119 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4120 return rcStrict;
4121 }
4122
4123 /*
4124 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4125 */
4126 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4127 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4128 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4129 {
4130 PX86DESC pDescCurTSS;
4131 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4132 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4133 if (rcStrict != VINF_SUCCESS)
4134 {
4135 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4136 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4137 return rcStrict;
4138 }
4139
4140 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4141 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4142 if (rcStrict != VINF_SUCCESS)
4143 {
4144 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4145 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4146 return rcStrict;
4147 }
4148
4149 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4150 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4151 {
4152 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4153 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4154 u32EFlags &= ~X86_EFL_NT;
4155 }
4156 }
4157
4158 /*
4159 * Save the CPU state into the current TSS.
4160 */
4161 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4162 if (GCPtrNewTSS == GCPtrCurTSS)
4163 {
4164 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4165 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4166 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4167 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4168 pVCpu->cpum.GstCtx.ldtr.Sel));
4169 }
4170 if (fIsNewTSS386)
4171 {
4172 /*
4173 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4174 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4175 */
4176 void *pvCurTSS32;
4177 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4178 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4179 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4180 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4181 if (rcStrict != VINF_SUCCESS)
4182 {
4183 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4184 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4185 return rcStrict;
4186 }
4187
4188 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4189 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4190 pCurTSS32->eip = uNextEip;
4191 pCurTSS32->eflags = u32EFlags;
4192 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4193 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4194 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4195 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4196 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4197 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4198 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4199 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4200 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4201 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4202 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4203 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4204 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4205 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4206
4207 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4208 if (rcStrict != VINF_SUCCESS)
4209 {
4210 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4211 VBOXSTRICTRC_VAL(rcStrict)));
4212 return rcStrict;
4213 }
4214 }
4215 else
4216 {
4217 /*
4218 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4219 */
4220 void *pvCurTSS16;
4221 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4222 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4223 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4224 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4225 if (rcStrict != VINF_SUCCESS)
4226 {
4227 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4228 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4229 return rcStrict;
4230 }
4231
4232 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4233 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4234 pCurTSS16->ip = uNextEip;
4235 pCurTSS16->flags = u32EFlags;
4236 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4237 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4238 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4239 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4240 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4241 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4242 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4243 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4244 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4245 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4246 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4247 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4248
4249 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4250 if (rcStrict != VINF_SUCCESS)
4251 {
4252 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4253 VBOXSTRICTRC_VAL(rcStrict)));
4254 return rcStrict;
4255 }
4256 }
4257
4258 /*
4259 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4260 */
4261 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4262 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4263 {
4264 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4265 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4266 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4267 }
4268
4269 /*
4270 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4271 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4272 */
4273 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4274 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4275 bool fNewDebugTrap;
4276 if (fIsNewTSS386)
4277 {
4278 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4279 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4280 uNewEip = pNewTSS32->eip;
4281 uNewEflags = pNewTSS32->eflags;
4282 uNewEax = pNewTSS32->eax;
4283 uNewEcx = pNewTSS32->ecx;
4284 uNewEdx = pNewTSS32->edx;
4285 uNewEbx = pNewTSS32->ebx;
4286 uNewEsp = pNewTSS32->esp;
4287 uNewEbp = pNewTSS32->ebp;
4288 uNewEsi = pNewTSS32->esi;
4289 uNewEdi = pNewTSS32->edi;
4290 uNewES = pNewTSS32->es;
4291 uNewCS = pNewTSS32->cs;
4292 uNewSS = pNewTSS32->ss;
4293 uNewDS = pNewTSS32->ds;
4294 uNewFS = pNewTSS32->fs;
4295 uNewGS = pNewTSS32->gs;
4296 uNewLdt = pNewTSS32->selLdt;
4297 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4298 }
4299 else
4300 {
4301 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4302 uNewCr3 = 0;
4303 uNewEip = pNewTSS16->ip;
4304 uNewEflags = pNewTSS16->flags;
4305 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4306 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4307 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4308 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4309 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4310 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4311 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4312 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4313 uNewES = pNewTSS16->es;
4314 uNewCS = pNewTSS16->cs;
4315 uNewSS = pNewTSS16->ss;
4316 uNewDS = pNewTSS16->ds;
4317 uNewFS = 0;
4318 uNewGS = 0;
4319 uNewLdt = pNewTSS16->selLdt;
4320 fNewDebugTrap = false;
4321 }
4322
4323 if (GCPtrNewTSS == GCPtrCurTSS)
4324 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4325 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4326
4327 /*
4328 * We're done accessing the new TSS.
4329 */
4330 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4331 if (rcStrict != VINF_SUCCESS)
4332 {
4333 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4334 return rcStrict;
4335 }
4336
4337 /*
4338 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4339 */
4340 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4341 {
4342 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4343 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4344 if (rcStrict != VINF_SUCCESS)
4345 {
4346 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4347 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4348 return rcStrict;
4349 }
4350
4351 /* Check that the descriptor indicates the new TSS is available (not busy). */
4352 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4353 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4354 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4355
4356 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4357 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4358 if (rcStrict != VINF_SUCCESS)
4359 {
4360 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4361 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4362 return rcStrict;
4363 }
4364 }
4365
4366 /*
4367 * From this point on, we're technically in the new task. We will defer exceptions
4368 * until the completion of the task switch but before executing any instructions in the new task.
4369 */
4370 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4371 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4372 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4373 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4374 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4375 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4376 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4377
4378 /* Set the busy bit in TR. */
4379 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4380 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4381 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4382 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4383 {
4384 uNewEflags |= X86_EFL_NT;
4385 }
4386
4387 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4388 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4389 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4390
4391 pVCpu->cpum.GstCtx.eip = uNewEip;
4392 pVCpu->cpum.GstCtx.eax = uNewEax;
4393 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4394 pVCpu->cpum.GstCtx.edx = uNewEdx;
4395 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4396 pVCpu->cpum.GstCtx.esp = uNewEsp;
4397 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4398 pVCpu->cpum.GstCtx.esi = uNewEsi;
4399 pVCpu->cpum.GstCtx.edi = uNewEdi;
4400
4401 uNewEflags &= X86_EFL_LIVE_MASK;
4402 uNewEflags |= X86_EFL_RA1_MASK;
4403 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4404
4405 /*
4406 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4407 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4408 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4409 */
4410 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4411 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4412
4413 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4414 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4415
4416 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4417 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4418
4419 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4420 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4421
4422 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4423 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4424
4425 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4426 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4427 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4428
4429 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4430 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4431 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4432 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4433
4434 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4435 {
4436 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4437 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4438 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4439 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4440 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4441 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4442 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4443 }
4444
4445 /*
4446 * Switch CR3 for the new task.
4447 */
4448 if ( fIsNewTSS386
4449 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4450 {
4451 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4452 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4453 AssertRCSuccessReturn(rc, rc);
4454
4455 /* Inform PGM. */
4456 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4457 AssertRCReturn(rc, rc);
4458 /* ignore informational status codes */
4459
4460 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4461 }
4462
4463 /*
4464 * Switch LDTR for the new task.
4465 */
4466 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4467 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4468 else
4469 {
4470 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4471
4472 IEMSELDESC DescNewLdt;
4473 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4474 if (rcStrict != VINF_SUCCESS)
4475 {
4476 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4477 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4478 return rcStrict;
4479 }
4480 if ( !DescNewLdt.Legacy.Gen.u1Present
4481 || DescNewLdt.Legacy.Gen.u1DescType
4482 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4483 {
4484 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4485 uNewLdt, DescNewLdt.Legacy.u));
4486 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4487 }
4488
4489 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4490 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4491 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4492 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4493 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4494 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4495 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4496 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4497 }
4498
4499 IEMSELDESC DescSS;
4500 if (IEM_IS_V86_MODE(pVCpu))
4501 {
4502 pVCpu->iem.s.uCpl = 3;
4503 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4504 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4505 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4506 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4507 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4508 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4509
4510 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4511 DescSS.Legacy.u = 0;
4512 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4513 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4514 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4515 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4516 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4517 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4518 DescSS.Legacy.Gen.u2Dpl = 3;
4519 }
4520 else
4521 {
4522 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4523
4524 /*
4525 * Load the stack segment for the new task.
4526 */
4527 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4528 {
4529 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4530 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4531 }
4532
4533 /* Fetch the descriptor. */
4534 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4535 if (rcStrict != VINF_SUCCESS)
4536 {
4537 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4538 VBOXSTRICTRC_VAL(rcStrict)));
4539 return rcStrict;
4540 }
4541
4542 /* SS must be a data segment and writable. */
4543 if ( !DescSS.Legacy.Gen.u1DescType
4544 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4545 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4546 {
4547 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4548 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4550 }
4551
4552 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4553 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4554 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4555 {
4556 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4557 uNewCpl));
4558 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4559 }
4560
4561 /* Is it there? */
4562 if (!DescSS.Legacy.Gen.u1Present)
4563 {
4564 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4565 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4566 }
4567
4568 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4569 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4570
4571 /* Set the accessed bit before committing the result into SS. */
4572 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4573 {
4574 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4575 if (rcStrict != VINF_SUCCESS)
4576 return rcStrict;
4577 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4578 }
4579
4580 /* Commit SS. */
4581 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4582 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4583 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4584 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4585 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4586 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4587 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4588
4589 /* CPL has changed, update IEM before loading rest of segments. */
4590 pVCpu->iem.s.uCpl = uNewCpl;
4591
4592 /*
4593 * Load the data segments for the new task.
4594 */
4595 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4596 if (rcStrict != VINF_SUCCESS)
4597 return rcStrict;
4598 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4599 if (rcStrict != VINF_SUCCESS)
4600 return rcStrict;
4601 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4602 if (rcStrict != VINF_SUCCESS)
4603 return rcStrict;
4604 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4605 if (rcStrict != VINF_SUCCESS)
4606 return rcStrict;
4607
4608 /*
4609 * Load the code segment for the new task.
4610 */
4611 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4612 {
4613 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4614 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4615 }
4616
4617 /* Fetch the descriptor. */
4618 IEMSELDESC DescCS;
4619 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4620 if (rcStrict != VINF_SUCCESS)
4621 {
4622 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4623 return rcStrict;
4624 }
4625
4626 /* CS must be a code segment. */
4627 if ( !DescCS.Legacy.Gen.u1DescType
4628 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4629 {
4630 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4631 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4632 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4633 }
4634
4635 /* For conforming CS, DPL must be less than or equal to the RPL. */
4636 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4637 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4638 {
4639 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4640 DescCS.Legacy.Gen.u2Dpl));
4641 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4642 }
4643
4644 /* For non-conforming CS, DPL must match RPL. */
4645 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4646 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4647 {
4648 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4649 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4650 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4651 }
4652
4653 /* Is it there? */
4654 if (!DescCS.Legacy.Gen.u1Present)
4655 {
4656 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4657 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4658 }
4659
4660 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4661 u64Base = X86DESC_BASE(&DescCS.Legacy);
4662
4663 /* Set the accessed bit before committing the result into CS. */
4664 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4665 {
4666 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4667 if (rcStrict != VINF_SUCCESS)
4668 return rcStrict;
4669 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4670 }
4671
4672 /* Commit CS. */
4673 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4674 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4675 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4676 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4677 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4678 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4679 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4680 }
4681
4682 /** @todo Debug trap. */
4683 if (fIsNewTSS386 && fNewDebugTrap)
4684 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4685
4686 /*
4687 * Construct the error code masks based on what caused this task switch.
4688 * See Intel Instruction reference for INT.
4689 */
4690 uint16_t uExt;
4691 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4692 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4693 {
4694 uExt = 1;
4695 }
4696 else
4697 uExt = 0;
4698
4699 /*
4700 * Push any error code on to the new stack.
4701 */
4702 if (fFlags & IEM_XCPT_FLAGS_ERR)
4703 {
4704 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4705 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4706 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4707
4708 /* Check that there is sufficient space on the stack. */
4709 /** @todo Factor out segment limit checking for normal/expand down segments
4710 * into a separate function. */
4711 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4712 {
4713 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4714 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4715 {
4716 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4717 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4718 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4719 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4720 }
4721 }
4722 else
4723 {
4724 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4725 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4726 {
4727 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4728 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4729 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4730 }
4731 }
4732
4733
4734 if (fIsNewTSS386)
4735 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4736 else
4737 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4738 if (rcStrict != VINF_SUCCESS)
4739 {
4740 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4741 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4742 return rcStrict;
4743 }
4744 }
4745
4746 /* Check the new EIP against the new CS limit. */
4747 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4748 {
4749 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4750 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4751 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4752 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4753 }
4754
4755 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4756 pVCpu->cpum.GstCtx.ss.Sel));
4757 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4758}
4759
4760
4761/**
4762 * Implements exceptions and interrupts for protected mode.
4763 *
4764 * @returns VBox strict status code.
4765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4766 * @param cbInstr The number of bytes to offset rIP by in the return
4767 * address.
4768 * @param u8Vector The interrupt / exception vector number.
4769 * @param fFlags The flags.
4770 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4771 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4772 */
4773IEM_STATIC VBOXSTRICTRC
4774iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4775 uint8_t cbInstr,
4776 uint8_t u8Vector,
4777 uint32_t fFlags,
4778 uint16_t uErr,
4779 uint64_t uCr2)
4780{
4781 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4782
4783 /*
4784 * Read the IDT entry.
4785 */
4786 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4787 {
4788 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4789 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4790 }
4791 X86DESC Idte;
4792 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4793 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4794 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4795 {
4796 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4797 return rcStrict;
4798 }
4799 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4800 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4801 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4802
4803 /*
4804 * Check the descriptor type, DPL and such.
4805 * ASSUMES this is done in the same order as described for call-gate calls.
4806 */
4807 if (Idte.Gate.u1DescType)
4808 {
4809 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4810 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4811 }
4812 bool fTaskGate = false;
4813 uint8_t f32BitGate = true;
4814 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4815 switch (Idte.Gate.u4Type)
4816 {
4817 case X86_SEL_TYPE_SYS_UNDEFINED:
4818 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4819 case X86_SEL_TYPE_SYS_LDT:
4820 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4821 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4822 case X86_SEL_TYPE_SYS_UNDEFINED2:
4823 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4824 case X86_SEL_TYPE_SYS_UNDEFINED3:
4825 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4826 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4827 case X86_SEL_TYPE_SYS_UNDEFINED4:
4828 {
4829 /** @todo check what actually happens when the type is wrong...
4830 * esp. call gates. */
4831 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4832 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4833 }
4834
4835 case X86_SEL_TYPE_SYS_286_INT_GATE:
4836 f32BitGate = false;
4837 RT_FALL_THRU();
4838 case X86_SEL_TYPE_SYS_386_INT_GATE:
4839 fEflToClear |= X86_EFL_IF;
4840 break;
4841
4842 case X86_SEL_TYPE_SYS_TASK_GATE:
4843 fTaskGate = true;
4844#ifndef IEM_IMPLEMENTS_TASKSWITCH
4845 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4846#endif
4847 break;
4848
4849 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4850 f32BitGate = false;
4851 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4852 break;
4853
4854 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4855 }
4856
4857 /* Check DPL against CPL if applicable. */
4858 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4859 {
4860 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4861 {
4862 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4863 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4864 }
4865 }
4866
4867 /* Is it there? */
4868 if (!Idte.Gate.u1Present)
4869 {
4870 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4871 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4872 }
4873
4874 /* Is it a task-gate? */
4875 if (fTaskGate)
4876 {
4877 /*
4878 * Construct the error code masks based on what caused this task switch.
4879 * See Intel Instruction reference for INT.
4880 */
4881 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4882 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4883 RTSEL SelTSS = Idte.Gate.u16Sel;
4884
4885 /*
4886 * Fetch the TSS descriptor in the GDT.
4887 */
4888 IEMSELDESC DescTSS;
4889 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4890 if (rcStrict != VINF_SUCCESS)
4891 {
4892 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4893 VBOXSTRICTRC_VAL(rcStrict)));
4894 return rcStrict;
4895 }
4896
4897 /* The TSS descriptor must be a system segment and be available (not busy). */
4898 if ( DescTSS.Legacy.Gen.u1DescType
4899 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4900 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4901 {
4902 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4903 u8Vector, SelTSS, DescTSS.Legacy.au64));
4904 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4905 }
4906
4907 /* The TSS must be present. */
4908 if (!DescTSS.Legacy.Gen.u1Present)
4909 {
4910 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4911 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4912 }
4913
4914 /* Do the actual task switch. */
4915 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4916 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4917 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4918 }
4919
4920 /* A null CS is bad. */
4921 RTSEL NewCS = Idte.Gate.u16Sel;
4922 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4923 {
4924 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4925 return iemRaiseGeneralProtectionFault0(pVCpu);
4926 }
4927
4928 /* Fetch the descriptor for the new CS. */
4929 IEMSELDESC DescCS;
4930 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4931 if (rcStrict != VINF_SUCCESS)
4932 {
4933 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4934 return rcStrict;
4935 }
4936
4937 /* Must be a code segment. */
4938 if (!DescCS.Legacy.Gen.u1DescType)
4939 {
4940 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4941 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4942 }
4943 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4944 {
4945 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4946 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4947 }
4948
4949 /* Don't allow lowering the privilege level. */
4950 /** @todo Does the lowering of privileges apply to software interrupts
4951 * only? This has bearings on the more-privileged or
4952 * same-privilege stack behavior further down. A testcase would
4953 * be nice. */
4954 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4955 {
4956 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4957 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4958 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4959 }
4960
4961 /* Make sure the selector is present. */
4962 if (!DescCS.Legacy.Gen.u1Present)
4963 {
4964 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4965 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4966 }
4967
4968 /* Check the new EIP against the new CS limit. */
4969 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4970 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4971 ? Idte.Gate.u16OffsetLow
4972 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4973 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4974 if (uNewEip > cbLimitCS)
4975 {
4976 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4977 u8Vector, uNewEip, cbLimitCS, NewCS));
4978 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4979 }
4980 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4981
4982 /* Calc the flag image to push. */
4983 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4984 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4985 fEfl &= ~X86_EFL_RF;
4986 else
4987 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4988
4989 /* From V8086 mode only go to CPL 0. */
4990 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4991 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4992 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4993 {
4994 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4995 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4996 }
4997
4998 /*
4999 * If the privilege level changes, we need to get a new stack from the TSS.
5000 * This in turns means validating the new SS and ESP...
5001 */
5002 if (uNewCpl != pVCpu->iem.s.uCpl)
5003 {
5004 RTSEL NewSS;
5005 uint32_t uNewEsp;
5006 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5007 if (rcStrict != VINF_SUCCESS)
5008 return rcStrict;
5009
5010 IEMSELDESC DescSS;
5011 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5012 if (rcStrict != VINF_SUCCESS)
5013 return rcStrict;
5014 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5015 if (!DescSS.Legacy.Gen.u1DefBig)
5016 {
5017 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5018 uNewEsp = (uint16_t)uNewEsp;
5019 }
5020
5021 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5022
5023 /* Check that there is sufficient space for the stack frame. */
5024 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5025 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5026 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5027 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5028
5029 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5030 {
5031 if ( uNewEsp - 1 > cbLimitSS
5032 || uNewEsp < cbStackFrame)
5033 {
5034 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5035 u8Vector, NewSS, uNewEsp, cbStackFrame));
5036 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5037 }
5038 }
5039 else
5040 {
5041 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5042 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5043 {
5044 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5045 u8Vector, NewSS, uNewEsp, cbStackFrame));
5046 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5047 }
5048 }
5049
5050 /*
5051 * Start making changes.
5052 */
5053
5054 /* Set the new CPL so that stack accesses use it. */
5055 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5056 pVCpu->iem.s.uCpl = uNewCpl;
5057
5058 /* Create the stack frame. */
5059 RTPTRUNION uStackFrame;
5060 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5061 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5062 if (rcStrict != VINF_SUCCESS)
5063 return rcStrict;
5064 void * const pvStackFrame = uStackFrame.pv;
5065 if (f32BitGate)
5066 {
5067 if (fFlags & IEM_XCPT_FLAGS_ERR)
5068 *uStackFrame.pu32++ = uErr;
5069 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5070 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5071 uStackFrame.pu32[2] = fEfl;
5072 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5073 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5074 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5075 if (fEfl & X86_EFL_VM)
5076 {
5077 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5078 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5079 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5080 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5081 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5082 }
5083 }
5084 else
5085 {
5086 if (fFlags & IEM_XCPT_FLAGS_ERR)
5087 *uStackFrame.pu16++ = uErr;
5088 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5089 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5090 uStackFrame.pu16[2] = fEfl;
5091 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5092 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5093 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5094 if (fEfl & X86_EFL_VM)
5095 {
5096 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5097 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5098 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5099 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5100 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5101 }
5102 }
5103 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5104 if (rcStrict != VINF_SUCCESS)
5105 return rcStrict;
5106
5107 /* Mark the selectors 'accessed' (hope this is the correct time). */
5108 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5109 * after pushing the stack frame? (Write protect the gdt + stack to
5110 * find out.) */
5111 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5112 {
5113 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5114 if (rcStrict != VINF_SUCCESS)
5115 return rcStrict;
5116 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5117 }
5118
5119 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5120 {
5121 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5122 if (rcStrict != VINF_SUCCESS)
5123 return rcStrict;
5124 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5125 }
5126
5127 /*
5128 * Start comitting the register changes (joins with the DPL=CPL branch).
5129 */
5130 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5131 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5132 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5133 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5134 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5135 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5136 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5137 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5138 * SP is loaded).
5139 * Need to check the other combinations too:
5140 * - 16-bit TSS, 32-bit handler
5141 * - 32-bit TSS, 16-bit handler */
5142 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5143 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5144 else
5145 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5146
5147 if (fEfl & X86_EFL_VM)
5148 {
5149 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5150 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5151 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5152 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5153 }
5154 }
5155 /*
5156 * Same privilege, no stack change and smaller stack frame.
5157 */
5158 else
5159 {
5160 uint64_t uNewRsp;
5161 RTPTRUNION uStackFrame;
5162 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5163 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5164 if (rcStrict != VINF_SUCCESS)
5165 return rcStrict;
5166 void * const pvStackFrame = uStackFrame.pv;
5167
5168 if (f32BitGate)
5169 {
5170 if (fFlags & IEM_XCPT_FLAGS_ERR)
5171 *uStackFrame.pu32++ = uErr;
5172 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5173 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5174 uStackFrame.pu32[2] = fEfl;
5175 }
5176 else
5177 {
5178 if (fFlags & IEM_XCPT_FLAGS_ERR)
5179 *uStackFrame.pu16++ = uErr;
5180 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5181 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5182 uStackFrame.pu16[2] = fEfl;
5183 }
5184 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5185 if (rcStrict != VINF_SUCCESS)
5186 return rcStrict;
5187
5188 /* Mark the CS selector as 'accessed'. */
5189 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5190 {
5191 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5192 if (rcStrict != VINF_SUCCESS)
5193 return rcStrict;
5194 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5195 }
5196
5197 /*
5198 * Start committing the register changes (joins with the other branch).
5199 */
5200 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5201 }
5202
5203 /* ... register committing continues. */
5204 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5205 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5206 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5207 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5208 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5209 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5210
5211 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5212 fEfl &= ~fEflToClear;
5213 IEMMISC_SET_EFL(pVCpu, fEfl);
5214
5215 if (fFlags & IEM_XCPT_FLAGS_CR2)
5216 pVCpu->cpum.GstCtx.cr2 = uCr2;
5217
5218 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5219 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5220
5221 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5222}
5223
5224
5225/**
5226 * Implements exceptions and interrupts for long mode.
5227 *
5228 * @returns VBox strict status code.
5229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5230 * @param cbInstr The number of bytes to offset rIP by in the return
5231 * address.
5232 * @param u8Vector The interrupt / exception vector number.
5233 * @param fFlags The flags.
5234 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5235 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5236 */
5237IEM_STATIC VBOXSTRICTRC
5238iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5239 uint8_t cbInstr,
5240 uint8_t u8Vector,
5241 uint32_t fFlags,
5242 uint16_t uErr,
5243 uint64_t uCr2)
5244{
5245 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5246
5247 /*
5248 * Read the IDT entry.
5249 */
5250 uint16_t offIdt = (uint16_t)u8Vector << 4;
5251 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5252 {
5253 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5254 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5255 }
5256 X86DESC64 Idte;
5257 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5258 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5259 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5260 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5261 {
5262 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5263 return rcStrict;
5264 }
5265 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5266 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5267 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5268
5269 /*
5270 * Check the descriptor type, DPL and such.
5271 * ASSUMES this is done in the same order as described for call-gate calls.
5272 */
5273 if (Idte.Gate.u1DescType)
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5276 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5277 }
5278 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5279 switch (Idte.Gate.u4Type)
5280 {
5281 case AMD64_SEL_TYPE_SYS_INT_GATE:
5282 fEflToClear |= X86_EFL_IF;
5283 break;
5284 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5285 break;
5286
5287 default:
5288 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5289 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5290 }
5291
5292 /* Check DPL against CPL if applicable. */
5293 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5294 {
5295 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5296 {
5297 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5298 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5299 }
5300 }
5301
5302 /* Is it there? */
5303 if (!Idte.Gate.u1Present)
5304 {
5305 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5306 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5307 }
5308
5309 /* A null CS is bad. */
5310 RTSEL NewCS = Idte.Gate.u16Sel;
5311 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5312 {
5313 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5314 return iemRaiseGeneralProtectionFault0(pVCpu);
5315 }
5316
5317 /* Fetch the descriptor for the new CS. */
5318 IEMSELDESC DescCS;
5319 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5320 if (rcStrict != VINF_SUCCESS)
5321 {
5322 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5323 return rcStrict;
5324 }
5325
5326 /* Must be a 64-bit code segment. */
5327 if (!DescCS.Long.Gen.u1DescType)
5328 {
5329 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5330 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5331 }
5332 if ( !DescCS.Long.Gen.u1Long
5333 || DescCS.Long.Gen.u1DefBig
5334 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5335 {
5336 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5337 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5338 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5339 }
5340
5341 /* Don't allow lowering the privilege level. For non-conforming CS
5342 selectors, the CS.DPL sets the privilege level the trap/interrupt
5343 handler runs at. For conforming CS selectors, the CPL remains
5344 unchanged, but the CS.DPL must be <= CPL. */
5345 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5346 * when CPU in Ring-0. Result \#GP? */
5347 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5348 {
5349 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5350 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5351 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5352 }
5353
5354
5355 /* Make sure the selector is present. */
5356 if (!DescCS.Legacy.Gen.u1Present)
5357 {
5358 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5359 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5360 }
5361
5362 /* Check that the new RIP is canonical. */
5363 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5364 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5365 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5366 if (!IEM_IS_CANONICAL(uNewRip))
5367 {
5368 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5369 return iemRaiseGeneralProtectionFault0(pVCpu);
5370 }
5371
5372 /*
5373 * If the privilege level changes or if the IST isn't zero, we need to get
5374 * a new stack from the TSS.
5375 */
5376 uint64_t uNewRsp;
5377 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5378 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5379 if ( uNewCpl != pVCpu->iem.s.uCpl
5380 || Idte.Gate.u3IST != 0)
5381 {
5382 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5383 if (rcStrict != VINF_SUCCESS)
5384 return rcStrict;
5385 }
5386 else
5387 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5388 uNewRsp &= ~(uint64_t)0xf;
5389
5390 /*
5391 * Calc the flag image to push.
5392 */
5393 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5394 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5395 fEfl &= ~X86_EFL_RF;
5396 else
5397 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5398
5399 /*
5400 * Start making changes.
5401 */
5402 /* Set the new CPL so that stack accesses use it. */
5403 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5404 pVCpu->iem.s.uCpl = uNewCpl;
5405
5406 /* Create the stack frame. */
5407 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5408 RTPTRUNION uStackFrame;
5409 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5410 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5411 if (rcStrict != VINF_SUCCESS)
5412 return rcStrict;
5413 void * const pvStackFrame = uStackFrame.pv;
5414
5415 if (fFlags & IEM_XCPT_FLAGS_ERR)
5416 *uStackFrame.pu64++ = uErr;
5417 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5418 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5419 uStackFrame.pu64[2] = fEfl;
5420 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5421 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5422 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5423 if (rcStrict != VINF_SUCCESS)
5424 return rcStrict;
5425
5426 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5427 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5428 * after pushing the stack frame? (Write protect the gdt + stack to
5429 * find out.) */
5430 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5431 {
5432 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5433 if (rcStrict != VINF_SUCCESS)
5434 return rcStrict;
5435 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5436 }
5437
5438 /*
5439 * Start comitting the register changes.
5440 */
5441 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5442 * hidden registers when interrupting 32-bit or 16-bit code! */
5443 if (uNewCpl != uOldCpl)
5444 {
5445 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5446 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5447 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5448 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5449 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5450 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5451 }
5452 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5453 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5454 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5455 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5456 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5457 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5458 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5459 pVCpu->cpum.GstCtx.rip = uNewRip;
5460
5461 fEfl &= ~fEflToClear;
5462 IEMMISC_SET_EFL(pVCpu, fEfl);
5463
5464 if (fFlags & IEM_XCPT_FLAGS_CR2)
5465 pVCpu->cpum.GstCtx.cr2 = uCr2;
5466
5467 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5468 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5469
5470 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5471}
5472
5473
5474/**
5475 * Implements exceptions and interrupts.
5476 *
5477 * All exceptions and interrupts goes thru this function!
5478 *
5479 * @returns VBox strict status code.
5480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5481 * @param cbInstr The number of bytes to offset rIP by in the return
5482 * address.
5483 * @param u8Vector The interrupt / exception vector number.
5484 * @param fFlags The flags.
5485 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5486 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5487 */
5488DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5489iemRaiseXcptOrInt(PVMCPU pVCpu,
5490 uint8_t cbInstr,
5491 uint8_t u8Vector,
5492 uint32_t fFlags,
5493 uint16_t uErr,
5494 uint64_t uCr2)
5495{
5496 /*
5497 * Get all the state that we might need here.
5498 */
5499 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5500 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5501
5502#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5503 /*
5504 * Flush prefetch buffer
5505 */
5506 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5507#endif
5508
5509 /*
5510 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5511 */
5512 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5513 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5514 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5515 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5516 {
5517 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5518 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5519 u8Vector = X86_XCPT_GP;
5520 uErr = 0;
5521 }
5522#ifdef DBGFTRACE_ENABLED
5523 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5524 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5525 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5526#endif
5527
5528#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5529 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5530 {
5531 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5532 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5533 return rcStrict0;
5534 }
5535#endif
5536
5537#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5538 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5539 {
5540 /*
5541 * If the event is being injected as part of VMRUN, it isn't subject to event
5542 * intercepts in the nested-guest. However, secondary exceptions that occur
5543 * during injection of any event -are- subject to exception intercepts.
5544 *
5545 * See AMD spec. 15.20 "Event Injection".
5546 */
5547 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5548 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5549 else
5550 {
5551 /*
5552 * Check and handle if the event being raised is intercepted.
5553 */
5554 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5555 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5556 return rcStrict0;
5557 }
5558 }
5559#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5560
5561 /*
5562 * Do recursion accounting.
5563 */
5564 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5565 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5566 if (pVCpu->iem.s.cXcptRecursions == 0)
5567 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5568 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5569 else
5570 {
5571 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5572 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5573 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5574
5575 if (pVCpu->iem.s.cXcptRecursions >= 4)
5576 {
5577#ifdef DEBUG_bird
5578 AssertFailed();
5579#endif
5580 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5581 }
5582
5583 /*
5584 * Evaluate the sequence of recurring events.
5585 */
5586 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5587 NULL /* pXcptRaiseInfo */);
5588 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5589 { /* likely */ }
5590 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5591 {
5592 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5593 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5594 u8Vector = X86_XCPT_DF;
5595 uErr = 0;
5596 /** @todo NSTVMX: Do we need to do something here for VMX? */
5597 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5598 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5599 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5600 }
5601 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5602 {
5603 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5604 return iemInitiateCpuShutdown(pVCpu);
5605 }
5606 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5607 {
5608 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5609 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5610 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5611 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5612 return VERR_EM_GUEST_CPU_HANG;
5613 }
5614 else
5615 {
5616 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5617 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5618 return VERR_IEM_IPE_9;
5619 }
5620
5621 /*
5622 * The 'EXT' bit is set when an exception occurs during deliver of an external
5623 * event (such as an interrupt or earlier exception)[1]. Privileged software
5624 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5625 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5626 *
5627 * [1] - Intel spec. 6.13 "Error Code"
5628 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5629 * [3] - Intel Instruction reference for INT n.
5630 */
5631 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5632 && (fFlags & IEM_XCPT_FLAGS_ERR)
5633 && u8Vector != X86_XCPT_PF
5634 && u8Vector != X86_XCPT_DF)
5635 {
5636 uErr |= X86_TRAP_ERR_EXTERNAL;
5637 }
5638 }
5639
5640 pVCpu->iem.s.cXcptRecursions++;
5641 pVCpu->iem.s.uCurXcpt = u8Vector;
5642 pVCpu->iem.s.fCurXcpt = fFlags;
5643 pVCpu->iem.s.uCurXcptErr = uErr;
5644 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5645
5646 /*
5647 * Extensive logging.
5648 */
5649#if defined(LOG_ENABLED) && defined(IN_RING3)
5650 if (LogIs3Enabled())
5651 {
5652 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5653 PVM pVM = pVCpu->CTX_SUFF(pVM);
5654 char szRegs[4096];
5655 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5656 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5657 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5658 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5659 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5660 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5661 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5662 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5663 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5664 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5665 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5666 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5667 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5668 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5669 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5670 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5671 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5672 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5673 " efer=%016VR{efer}\n"
5674 " pat=%016VR{pat}\n"
5675 " sf_mask=%016VR{sf_mask}\n"
5676 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5677 " lstar=%016VR{lstar}\n"
5678 " star=%016VR{star} cstar=%016VR{cstar}\n"
5679 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5680 );
5681
5682 char szInstr[256];
5683 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5684 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5685 szInstr, sizeof(szInstr), NULL);
5686 Log3(("%s%s\n", szRegs, szInstr));
5687 }
5688#endif /* LOG_ENABLED */
5689
5690 /*
5691 * Call the mode specific worker function.
5692 */
5693 VBOXSTRICTRC rcStrict;
5694 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5695 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5696 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5697 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5698 else
5699 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5700
5701 /* Flush the prefetch buffer. */
5702#ifdef IEM_WITH_CODE_TLB
5703 pVCpu->iem.s.pbInstrBuf = NULL;
5704#else
5705 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5706#endif
5707
5708 /*
5709 * Unwind.
5710 */
5711 pVCpu->iem.s.cXcptRecursions--;
5712 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5713 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5714 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5715 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5716 pVCpu->iem.s.cXcptRecursions + 1));
5717 return rcStrict;
5718}
5719
5720#ifdef IEM_WITH_SETJMP
5721/**
5722 * See iemRaiseXcptOrInt. Will not return.
5723 */
5724IEM_STATIC DECL_NO_RETURN(void)
5725iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5726 uint8_t cbInstr,
5727 uint8_t u8Vector,
5728 uint32_t fFlags,
5729 uint16_t uErr,
5730 uint64_t uCr2)
5731{
5732 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5733 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5734}
5735#endif
5736
5737
5738/** \#DE - 00. */
5739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5740{
5741 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5742}
5743
5744
5745/** \#DB - 01.
5746 * @note This automatically clear DR7.GD. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5748{
5749 /** @todo set/clear RF. */
5750 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5751 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5752}
5753
5754
5755/** \#BR - 05. */
5756DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5757{
5758 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5759}
5760
5761
5762/** \#UD - 06. */
5763DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5764{
5765 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5766}
5767
5768
5769/** \#NM - 07. */
5770DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5771{
5772 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5773}
5774
5775
5776/** \#TS(err) - 0a. */
5777DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5778{
5779 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5780}
5781
5782
5783/** \#TS(tr) - 0a. */
5784DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5785{
5786 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5787 pVCpu->cpum.GstCtx.tr.Sel, 0);
5788}
5789
5790
5791/** \#TS(0) - 0a. */
5792DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5793{
5794 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5795 0, 0);
5796}
5797
5798
5799/** \#TS(err) - 0a. */
5800DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5801{
5802 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5803 uSel & X86_SEL_MASK_OFF_RPL, 0);
5804}
5805
5806
5807/** \#NP(err) - 0b. */
5808DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5809{
5810 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5811}
5812
5813
5814/** \#NP(sel) - 0b. */
5815DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5816{
5817 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5818 uSel & ~X86_SEL_RPL, 0);
5819}
5820
5821
5822/** \#SS(seg) - 0c. */
5823DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5824{
5825 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5826 uSel & ~X86_SEL_RPL, 0);
5827}
5828
5829
5830/** \#SS(err) - 0c. */
5831DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5832{
5833 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5834}
5835
5836
5837/** \#GP(n) - 0d. */
5838DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5839{
5840 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5841}
5842
5843
5844/** \#GP(0) - 0d. */
5845DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5846{
5847 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5848}
5849
5850#ifdef IEM_WITH_SETJMP
5851/** \#GP(0) - 0d. */
5852DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5853{
5854 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5855}
5856#endif
5857
5858
5859/** \#GP(sel) - 0d. */
5860DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5861{
5862 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5863 Sel & ~X86_SEL_RPL, 0);
5864}
5865
5866
5867/** \#GP(0) - 0d. */
5868DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5869{
5870 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5871}
5872
5873
5874/** \#GP(sel) - 0d. */
5875DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5876{
5877 NOREF(iSegReg); NOREF(fAccess);
5878 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5879 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5880}
5881
5882#ifdef IEM_WITH_SETJMP
5883/** \#GP(sel) - 0d, longjmp. */
5884DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5885{
5886 NOREF(iSegReg); NOREF(fAccess);
5887 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5888 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5889}
5890#endif
5891
5892/** \#GP(sel) - 0d. */
5893DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5894{
5895 NOREF(Sel);
5896 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5897}
5898
5899#ifdef IEM_WITH_SETJMP
5900/** \#GP(sel) - 0d, longjmp. */
5901DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5902{
5903 NOREF(Sel);
5904 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5905}
5906#endif
5907
5908
5909/** \#GP(sel) - 0d. */
5910DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5911{
5912 NOREF(iSegReg); NOREF(fAccess);
5913 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5914}
5915
5916#ifdef IEM_WITH_SETJMP
5917/** \#GP(sel) - 0d, longjmp. */
5918DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5919 uint32_t fAccess)
5920{
5921 NOREF(iSegReg); NOREF(fAccess);
5922 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5923}
5924#endif
5925
5926
5927/** \#PF(n) - 0e. */
5928DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5929{
5930 uint16_t uErr;
5931 switch (rc)
5932 {
5933 case VERR_PAGE_NOT_PRESENT:
5934 case VERR_PAGE_TABLE_NOT_PRESENT:
5935 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5936 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5937 uErr = 0;
5938 break;
5939
5940 default:
5941 AssertMsgFailed(("%Rrc\n", rc));
5942 RT_FALL_THRU();
5943 case VERR_ACCESS_DENIED:
5944 uErr = X86_TRAP_PF_P;
5945 break;
5946
5947 /** @todo reserved */
5948 }
5949
5950 if (pVCpu->iem.s.uCpl == 3)
5951 uErr |= X86_TRAP_PF_US;
5952
5953 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5954 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5955 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5956 uErr |= X86_TRAP_PF_ID;
5957
5958#if 0 /* This is so much non-sense, really. Why was it done like that? */
5959 /* Note! RW access callers reporting a WRITE protection fault, will clear
5960 the READ flag before calling. So, read-modify-write accesses (RW)
5961 can safely be reported as READ faults. */
5962 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5963 uErr |= X86_TRAP_PF_RW;
5964#else
5965 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5966 {
5967 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5968 uErr |= X86_TRAP_PF_RW;
5969 }
5970#endif
5971
5972 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5973 uErr, GCPtrWhere);
5974}
5975
5976#ifdef IEM_WITH_SETJMP
5977/** \#PF(n) - 0e, longjmp. */
5978IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5979{
5980 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5981}
5982#endif
5983
5984
5985/** \#MF(0) - 10. */
5986DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5987{
5988 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5989}
5990
5991
5992/** \#AC(0) - 11. */
5993DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5994{
5995 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5996}
5997
5998
5999/**
6000 * Macro for calling iemCImplRaiseDivideError().
6001 *
6002 * This enables us to add/remove arguments and force different levels of
6003 * inlining as we wish.
6004 *
6005 * @return Strict VBox status code.
6006 */
6007#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6008IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6009{
6010 NOREF(cbInstr);
6011 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6012}
6013
6014
6015/**
6016 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6017 *
6018 * This enables us to add/remove arguments and force different levels of
6019 * inlining as we wish.
6020 *
6021 * @return Strict VBox status code.
6022 */
6023#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6024IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6025{
6026 NOREF(cbInstr);
6027 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6028}
6029
6030
6031/**
6032 * Macro for calling iemCImplRaiseInvalidOpcode().
6033 *
6034 * This enables us to add/remove arguments and force different levels of
6035 * inlining as we wish.
6036 *
6037 * @return Strict VBox status code.
6038 */
6039#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6040IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6041{
6042 NOREF(cbInstr);
6043 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6044}
6045
6046
6047/** @} */
6048
6049
6050/*
6051 *
6052 * Helpers routines.
6053 * Helpers routines.
6054 * Helpers routines.
6055 *
6056 */
6057
6058/**
6059 * Recalculates the effective operand size.
6060 *
6061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6062 */
6063IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6064{
6065 switch (pVCpu->iem.s.enmCpuMode)
6066 {
6067 case IEMMODE_16BIT:
6068 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6069 break;
6070 case IEMMODE_32BIT:
6071 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6072 break;
6073 case IEMMODE_64BIT:
6074 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6075 {
6076 case 0:
6077 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6078 break;
6079 case IEM_OP_PRF_SIZE_OP:
6080 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6081 break;
6082 case IEM_OP_PRF_SIZE_REX_W:
6083 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6084 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6085 break;
6086 }
6087 break;
6088 default:
6089 AssertFailed();
6090 }
6091}
6092
6093
6094/**
6095 * Sets the default operand size to 64-bit and recalculates the effective
6096 * operand size.
6097 *
6098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6099 */
6100IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6101{
6102 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6103 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6104 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6105 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6106 else
6107 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6108}
6109
6110
6111/*
6112 *
6113 * Common opcode decoders.
6114 * Common opcode decoders.
6115 * Common opcode decoders.
6116 *
6117 */
6118//#include <iprt/mem.h>
6119
6120/**
6121 * Used to add extra details about a stub case.
6122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6123 */
6124IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6125{
6126#if defined(LOG_ENABLED) && defined(IN_RING3)
6127 PVM pVM = pVCpu->CTX_SUFF(pVM);
6128 char szRegs[4096];
6129 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6130 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6131 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6132 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6133 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6134 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6135 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6136 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6137 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6138 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6139 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6140 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6141 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6142 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6143 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6144 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6145 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6146 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6147 " efer=%016VR{efer}\n"
6148 " pat=%016VR{pat}\n"
6149 " sf_mask=%016VR{sf_mask}\n"
6150 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6151 " lstar=%016VR{lstar}\n"
6152 " star=%016VR{star} cstar=%016VR{cstar}\n"
6153 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6154 );
6155
6156 char szInstr[256];
6157 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6158 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6159 szInstr, sizeof(szInstr), NULL);
6160
6161 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6162#else
6163 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6164#endif
6165}
6166
6167/**
6168 * Complains about a stub.
6169 *
6170 * Providing two versions of this macro, one for daily use and one for use when
6171 * working on IEM.
6172 */
6173#if 0
6174# define IEMOP_BITCH_ABOUT_STUB() \
6175 do { \
6176 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6177 iemOpStubMsg2(pVCpu); \
6178 RTAssertPanic(); \
6179 } while (0)
6180#else
6181# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6182#endif
6183
6184/** Stubs an opcode. */
6185#define FNIEMOP_STUB(a_Name) \
6186 FNIEMOP_DEF(a_Name) \
6187 { \
6188 RT_NOREF_PV(pVCpu); \
6189 IEMOP_BITCH_ABOUT_STUB(); \
6190 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6191 } \
6192 typedef int ignore_semicolon
6193
6194/** Stubs an opcode. */
6195#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6196 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6197 { \
6198 RT_NOREF_PV(pVCpu); \
6199 RT_NOREF_PV(a_Name0); \
6200 IEMOP_BITCH_ABOUT_STUB(); \
6201 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6202 } \
6203 typedef int ignore_semicolon
6204
6205/** Stubs an opcode which currently should raise \#UD. */
6206#define FNIEMOP_UD_STUB(a_Name) \
6207 FNIEMOP_DEF(a_Name) \
6208 { \
6209 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6210 return IEMOP_RAISE_INVALID_OPCODE(); \
6211 } \
6212 typedef int ignore_semicolon
6213
6214/** Stubs an opcode which currently should raise \#UD. */
6215#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6216 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6217 { \
6218 RT_NOREF_PV(pVCpu); \
6219 RT_NOREF_PV(a_Name0); \
6220 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6221 return IEMOP_RAISE_INVALID_OPCODE(); \
6222 } \
6223 typedef int ignore_semicolon
6224
6225
6226
6227/** @name Register Access.
6228 * @{
6229 */
6230
6231/**
6232 * Gets a reference (pointer) to the specified hidden segment register.
6233 *
6234 * @returns Hidden register reference.
6235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6236 * @param iSegReg The segment register.
6237 */
6238IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6239{
6240 Assert(iSegReg < X86_SREG_COUNT);
6241 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6242 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6243
6244#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6245 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6246 { /* likely */ }
6247 else
6248 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6249#else
6250 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6251#endif
6252 return pSReg;
6253}
6254
6255
6256/**
6257 * Ensures that the given hidden segment register is up to date.
6258 *
6259 * @returns Hidden register reference.
6260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6261 * @param pSReg The segment register.
6262 */
6263IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6264{
6265#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6266 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6267 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6268#else
6269 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6270 NOREF(pVCpu);
6271#endif
6272 return pSReg;
6273}
6274
6275
6276/**
6277 * Gets a reference (pointer) to the specified segment register (the selector
6278 * value).
6279 *
6280 * @returns Pointer to the selector variable.
6281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6282 * @param iSegReg The segment register.
6283 */
6284DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6285{
6286 Assert(iSegReg < X86_SREG_COUNT);
6287 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6288 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6289}
6290
6291
6292/**
6293 * Fetches the selector value of a segment register.
6294 *
6295 * @returns The selector value.
6296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6297 * @param iSegReg The segment register.
6298 */
6299DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6300{
6301 Assert(iSegReg < X86_SREG_COUNT);
6302 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6303 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6304}
6305
6306
6307/**
6308 * Fetches the base address value of a segment register.
6309 *
6310 * @returns The selector value.
6311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6312 * @param iSegReg The segment register.
6313 */
6314DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6315{
6316 Assert(iSegReg < X86_SREG_COUNT);
6317 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6318 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6319}
6320
6321
6322/**
6323 * Gets a reference (pointer) to the specified general purpose register.
6324 *
6325 * @returns Register reference.
6326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6327 * @param iReg The general purpose register.
6328 */
6329DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6330{
6331 Assert(iReg < 16);
6332 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6333}
6334
6335
6336/**
6337 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6338 *
6339 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6340 *
6341 * @returns Register reference.
6342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6343 * @param iReg The register.
6344 */
6345DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6346{
6347 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6348 {
6349 Assert(iReg < 16);
6350 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6351 }
6352 /* high 8-bit register. */
6353 Assert(iReg < 8);
6354 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6355}
6356
6357
6358/**
6359 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6360 *
6361 * @returns Register reference.
6362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6363 * @param iReg The register.
6364 */
6365DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6366{
6367 Assert(iReg < 16);
6368 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6369}
6370
6371
6372/**
6373 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6374 *
6375 * @returns Register reference.
6376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6377 * @param iReg The register.
6378 */
6379DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6380{
6381 Assert(iReg < 16);
6382 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6383}
6384
6385
6386/**
6387 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6388 *
6389 * @returns Register reference.
6390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6391 * @param iReg The register.
6392 */
6393DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6394{
6395 Assert(iReg < 64);
6396 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6397}
6398
6399
6400/**
6401 * Gets a reference (pointer) to the specified segment register's base address.
6402 *
6403 * @returns Segment register base address reference.
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 * @param iSegReg The segment selector.
6406 */
6407DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6408{
6409 Assert(iSegReg < X86_SREG_COUNT);
6410 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6411 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6412}
6413
6414
6415/**
6416 * Fetches the value of a 8-bit general purpose register.
6417 *
6418 * @returns The register value.
6419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6420 * @param iReg The register.
6421 */
6422DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6423{
6424 return *iemGRegRefU8(pVCpu, iReg);
6425}
6426
6427
6428/**
6429 * Fetches the value of a 16-bit general purpose register.
6430 *
6431 * @returns The register value.
6432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6433 * @param iReg The register.
6434 */
6435DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6436{
6437 Assert(iReg < 16);
6438 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6439}
6440
6441
6442/**
6443 * Fetches the value of a 32-bit general purpose register.
6444 *
6445 * @returns The register value.
6446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6447 * @param iReg The register.
6448 */
6449DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6450{
6451 Assert(iReg < 16);
6452 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6453}
6454
6455
6456/**
6457 * Fetches the value of a 64-bit general purpose register.
6458 *
6459 * @returns The register value.
6460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6461 * @param iReg The register.
6462 */
6463DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6464{
6465 Assert(iReg < 16);
6466 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6467}
6468
6469
6470/**
6471 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6472 *
6473 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6474 * segment limit.
6475 *
6476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6477 * @param offNextInstr The offset of the next instruction.
6478 */
6479IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6480{
6481 switch (pVCpu->iem.s.enmEffOpSize)
6482 {
6483 case IEMMODE_16BIT:
6484 {
6485 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6486 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6487 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6488 return iemRaiseGeneralProtectionFault0(pVCpu);
6489 pVCpu->cpum.GstCtx.rip = uNewIp;
6490 break;
6491 }
6492
6493 case IEMMODE_32BIT:
6494 {
6495 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6496 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6497
6498 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6499 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6500 return iemRaiseGeneralProtectionFault0(pVCpu);
6501 pVCpu->cpum.GstCtx.rip = uNewEip;
6502 break;
6503 }
6504
6505 case IEMMODE_64BIT:
6506 {
6507 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6508
6509 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6510 if (!IEM_IS_CANONICAL(uNewRip))
6511 return iemRaiseGeneralProtectionFault0(pVCpu);
6512 pVCpu->cpum.GstCtx.rip = uNewRip;
6513 break;
6514 }
6515
6516 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6517 }
6518
6519 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6520
6521#ifndef IEM_WITH_CODE_TLB
6522 /* Flush the prefetch buffer. */
6523 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6524#endif
6525
6526 return VINF_SUCCESS;
6527}
6528
6529
6530/**
6531 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6532 *
6533 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6534 * segment limit.
6535 *
6536 * @returns Strict VBox status code.
6537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6538 * @param offNextInstr The offset of the next instruction.
6539 */
6540IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6541{
6542 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6543
6544 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6545 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6546 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6547 return iemRaiseGeneralProtectionFault0(pVCpu);
6548 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6549 pVCpu->cpum.GstCtx.rip = uNewIp;
6550 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6551
6552#ifndef IEM_WITH_CODE_TLB
6553 /* Flush the prefetch buffer. */
6554 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6555#endif
6556
6557 return VINF_SUCCESS;
6558}
6559
6560
6561/**
6562 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6563 *
6564 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6565 * segment limit.
6566 *
6567 * @returns Strict VBox status code.
6568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6569 * @param offNextInstr The offset of the next instruction.
6570 */
6571IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6572{
6573 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6574
6575 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6576 {
6577 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6578
6579 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6580 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6581 return iemRaiseGeneralProtectionFault0(pVCpu);
6582 pVCpu->cpum.GstCtx.rip = uNewEip;
6583 }
6584 else
6585 {
6586 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6587
6588 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6589 if (!IEM_IS_CANONICAL(uNewRip))
6590 return iemRaiseGeneralProtectionFault0(pVCpu);
6591 pVCpu->cpum.GstCtx.rip = uNewRip;
6592 }
6593 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6594
6595#ifndef IEM_WITH_CODE_TLB
6596 /* Flush the prefetch buffer. */
6597 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6598#endif
6599
6600 return VINF_SUCCESS;
6601}
6602
6603
6604/**
6605 * Performs a near jump to the specified address.
6606 *
6607 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6608 * segment limit.
6609 *
6610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6611 * @param uNewRip The new RIP value.
6612 */
6613IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6614{
6615 switch (pVCpu->iem.s.enmEffOpSize)
6616 {
6617 case IEMMODE_16BIT:
6618 {
6619 Assert(uNewRip <= UINT16_MAX);
6620 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6621 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6622 return iemRaiseGeneralProtectionFault0(pVCpu);
6623 /** @todo Test 16-bit jump in 64-bit mode. */
6624 pVCpu->cpum.GstCtx.rip = uNewRip;
6625 break;
6626 }
6627
6628 case IEMMODE_32BIT:
6629 {
6630 Assert(uNewRip <= UINT32_MAX);
6631 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6632 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6633
6634 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6635 return iemRaiseGeneralProtectionFault0(pVCpu);
6636 pVCpu->cpum.GstCtx.rip = uNewRip;
6637 break;
6638 }
6639
6640 case IEMMODE_64BIT:
6641 {
6642 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6643
6644 if (!IEM_IS_CANONICAL(uNewRip))
6645 return iemRaiseGeneralProtectionFault0(pVCpu);
6646 pVCpu->cpum.GstCtx.rip = uNewRip;
6647 break;
6648 }
6649
6650 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6651 }
6652
6653 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6654
6655#ifndef IEM_WITH_CODE_TLB
6656 /* Flush the prefetch buffer. */
6657 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6658#endif
6659
6660 return VINF_SUCCESS;
6661}
6662
6663
6664/**
6665 * Get the address of the top of the stack.
6666 *
6667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6668 */
6669DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6670{
6671 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6672 return pVCpu->cpum.GstCtx.rsp;
6673 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6674 return pVCpu->cpum.GstCtx.esp;
6675 return pVCpu->cpum.GstCtx.sp;
6676}
6677
6678
6679/**
6680 * Updates the RIP/EIP/IP to point to the next instruction.
6681 *
6682 * This function leaves the EFLAGS.RF flag alone.
6683 *
6684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6685 * @param cbInstr The number of bytes to add.
6686 */
6687IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6688{
6689 switch (pVCpu->iem.s.enmCpuMode)
6690 {
6691 case IEMMODE_16BIT:
6692 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6693 pVCpu->cpum.GstCtx.eip += cbInstr;
6694 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6695 break;
6696
6697 case IEMMODE_32BIT:
6698 pVCpu->cpum.GstCtx.eip += cbInstr;
6699 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6700 break;
6701
6702 case IEMMODE_64BIT:
6703 pVCpu->cpum.GstCtx.rip += cbInstr;
6704 break;
6705 default: AssertFailed();
6706 }
6707}
6708
6709
6710#if 0
6711/**
6712 * Updates the RIP/EIP/IP to point to the next instruction.
6713 *
6714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6715 */
6716IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6717{
6718 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6719}
6720#endif
6721
6722
6723
6724/**
6725 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6726 *
6727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6728 * @param cbInstr The number of bytes to add.
6729 */
6730IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6731{
6732 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6733
6734 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6735#if ARCH_BITS >= 64
6736 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6737 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6738 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6739#else
6740 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6741 pVCpu->cpum.GstCtx.rip += cbInstr;
6742 else
6743 pVCpu->cpum.GstCtx.eip += cbInstr;
6744#endif
6745}
6746
6747
6748/**
6749 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6750 *
6751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6752 */
6753IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6754{
6755 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6756}
6757
6758
6759/**
6760 * Adds to the stack pointer.
6761 *
6762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6763 * @param cbToAdd The number of bytes to add (8-bit!).
6764 */
6765DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6766{
6767 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6768 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6769 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6770 pVCpu->cpum.GstCtx.esp += cbToAdd;
6771 else
6772 pVCpu->cpum.GstCtx.sp += cbToAdd;
6773}
6774
6775
6776/**
6777 * Subtracts from the stack pointer.
6778 *
6779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6780 * @param cbToSub The number of bytes to subtract (8-bit!).
6781 */
6782DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6783{
6784 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6785 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6786 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6787 pVCpu->cpum.GstCtx.esp -= cbToSub;
6788 else
6789 pVCpu->cpum.GstCtx.sp -= cbToSub;
6790}
6791
6792
6793/**
6794 * Adds to the temporary stack pointer.
6795 *
6796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6797 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6798 * @param cbToAdd The number of bytes to add (16-bit).
6799 */
6800DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6801{
6802 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6803 pTmpRsp->u += cbToAdd;
6804 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6805 pTmpRsp->DWords.dw0 += cbToAdd;
6806 else
6807 pTmpRsp->Words.w0 += cbToAdd;
6808}
6809
6810
6811/**
6812 * Subtracts from the temporary stack pointer.
6813 *
6814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6815 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6816 * @param cbToSub The number of bytes to subtract.
6817 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6818 * expecting that.
6819 */
6820DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6821{
6822 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6823 pTmpRsp->u -= cbToSub;
6824 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6825 pTmpRsp->DWords.dw0 -= cbToSub;
6826 else
6827 pTmpRsp->Words.w0 -= cbToSub;
6828}
6829
6830
6831/**
6832 * Calculates the effective stack address for a push of the specified size as
6833 * well as the new RSP value (upper bits may be masked).
6834 *
6835 * @returns Effective stack addressf for the push.
6836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6837 * @param cbItem The size of the stack item to pop.
6838 * @param puNewRsp Where to return the new RSP value.
6839 */
6840DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6841{
6842 RTUINT64U uTmpRsp;
6843 RTGCPTR GCPtrTop;
6844 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6845
6846 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6847 GCPtrTop = uTmpRsp.u -= cbItem;
6848 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6849 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6850 else
6851 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6852 *puNewRsp = uTmpRsp.u;
6853 return GCPtrTop;
6854}
6855
6856
6857/**
6858 * Gets the current stack pointer and calculates the value after a pop of the
6859 * specified size.
6860 *
6861 * @returns Current stack pointer.
6862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6863 * @param cbItem The size of the stack item to pop.
6864 * @param puNewRsp Where to return the new RSP value.
6865 */
6866DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6867{
6868 RTUINT64U uTmpRsp;
6869 RTGCPTR GCPtrTop;
6870 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6871
6872 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6873 {
6874 GCPtrTop = uTmpRsp.u;
6875 uTmpRsp.u += cbItem;
6876 }
6877 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6878 {
6879 GCPtrTop = uTmpRsp.DWords.dw0;
6880 uTmpRsp.DWords.dw0 += cbItem;
6881 }
6882 else
6883 {
6884 GCPtrTop = uTmpRsp.Words.w0;
6885 uTmpRsp.Words.w0 += cbItem;
6886 }
6887 *puNewRsp = uTmpRsp.u;
6888 return GCPtrTop;
6889}
6890
6891
6892/**
6893 * Calculates the effective stack address for a push of the specified size as
6894 * well as the new temporary RSP value (upper bits may be masked).
6895 *
6896 * @returns Effective stack addressf for the push.
6897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6898 * @param pTmpRsp The temporary stack pointer. This is updated.
6899 * @param cbItem The size of the stack item to pop.
6900 */
6901DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6902{
6903 RTGCPTR GCPtrTop;
6904
6905 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6906 GCPtrTop = pTmpRsp->u -= cbItem;
6907 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6908 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6909 else
6910 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6911 return GCPtrTop;
6912}
6913
6914
6915/**
6916 * Gets the effective stack address for a pop of the specified size and
6917 * calculates and updates the temporary RSP.
6918 *
6919 * @returns Current stack pointer.
6920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6921 * @param pTmpRsp The temporary stack pointer. This is updated.
6922 * @param cbItem The size of the stack item to pop.
6923 */
6924DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6925{
6926 RTGCPTR GCPtrTop;
6927 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6928 {
6929 GCPtrTop = pTmpRsp->u;
6930 pTmpRsp->u += cbItem;
6931 }
6932 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6933 {
6934 GCPtrTop = pTmpRsp->DWords.dw0;
6935 pTmpRsp->DWords.dw0 += cbItem;
6936 }
6937 else
6938 {
6939 GCPtrTop = pTmpRsp->Words.w0;
6940 pTmpRsp->Words.w0 += cbItem;
6941 }
6942 return GCPtrTop;
6943}
6944
6945/** @} */
6946
6947
6948/** @name FPU access and helpers.
6949 *
6950 * @{
6951 */
6952
6953
6954/**
6955 * Hook for preparing to use the host FPU.
6956 *
6957 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6958 *
6959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6960 */
6961DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6962{
6963#ifdef IN_RING3
6964 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6965#else
6966 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6967#endif
6968 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6969}
6970
6971
6972/**
6973 * Hook for preparing to use the host FPU for SSE.
6974 *
6975 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6976 *
6977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6978 */
6979DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6980{
6981 iemFpuPrepareUsage(pVCpu);
6982}
6983
6984
6985/**
6986 * Hook for preparing to use the host FPU for AVX.
6987 *
6988 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6989 *
6990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6991 */
6992DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6993{
6994 iemFpuPrepareUsage(pVCpu);
6995}
6996
6997
6998/**
6999 * Hook for actualizing the guest FPU state before the interpreter reads it.
7000 *
7001 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7002 *
7003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7004 */
7005DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7006{
7007#ifdef IN_RING3
7008 NOREF(pVCpu);
7009#else
7010 CPUMRZFpuStateActualizeForRead(pVCpu);
7011#endif
7012 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7013}
7014
7015
7016/**
7017 * Hook for actualizing the guest FPU state before the interpreter changes it.
7018 *
7019 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7020 *
7021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7022 */
7023DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7024{
7025#ifdef IN_RING3
7026 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7027#else
7028 CPUMRZFpuStateActualizeForChange(pVCpu);
7029#endif
7030 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7031}
7032
7033
7034/**
7035 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7036 * only.
7037 *
7038 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7039 *
7040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7041 */
7042DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7043{
7044#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7045 NOREF(pVCpu);
7046#else
7047 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7048#endif
7049 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7050}
7051
7052
7053/**
7054 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7055 * read+write.
7056 *
7057 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7058 *
7059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7060 */
7061DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7062{
7063#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7064 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7065#else
7066 CPUMRZFpuStateActualizeForChange(pVCpu);
7067#endif
7068 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7069}
7070
7071
7072/**
7073 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7074 * only.
7075 *
7076 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7077 *
7078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7079 */
7080DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7081{
7082#ifdef IN_RING3
7083 NOREF(pVCpu);
7084#else
7085 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7086#endif
7087 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7088}
7089
7090
7091/**
7092 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7093 * read+write.
7094 *
7095 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7096 *
7097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7098 */
7099DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7100{
7101#ifdef IN_RING3
7102 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7103#else
7104 CPUMRZFpuStateActualizeForChange(pVCpu);
7105#endif
7106 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7107}
7108
7109
7110/**
7111 * Stores a QNaN value into a FPU register.
7112 *
7113 * @param pReg Pointer to the register.
7114 */
7115DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7116{
7117 pReg->au32[0] = UINT32_C(0x00000000);
7118 pReg->au32[1] = UINT32_C(0xc0000000);
7119 pReg->au16[4] = UINT16_C(0xffff);
7120}
7121
7122
7123/**
7124 * Updates the FOP, FPU.CS and FPUIP registers.
7125 *
7126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7127 * @param pFpuCtx The FPU context.
7128 */
7129DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7130{
7131 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7132 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7133 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7134 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7135 {
7136 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7137 * happens in real mode here based on the fnsave and fnstenv images. */
7138 pFpuCtx->CS = 0;
7139 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7140 }
7141 else
7142 {
7143 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7144 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7145 }
7146}
7147
7148
7149/**
7150 * Updates the x87.DS and FPUDP registers.
7151 *
7152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7153 * @param pFpuCtx The FPU context.
7154 * @param iEffSeg The effective segment register.
7155 * @param GCPtrEff The effective address relative to @a iEffSeg.
7156 */
7157DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7158{
7159 RTSEL sel;
7160 switch (iEffSeg)
7161 {
7162 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7163 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7164 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7165 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7166 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7167 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7168 default:
7169 AssertMsgFailed(("%d\n", iEffSeg));
7170 sel = pVCpu->cpum.GstCtx.ds.Sel;
7171 }
7172 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7173 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7174 {
7175 pFpuCtx->DS = 0;
7176 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7177 }
7178 else
7179 {
7180 pFpuCtx->DS = sel;
7181 pFpuCtx->FPUDP = GCPtrEff;
7182 }
7183}
7184
7185
7186/**
7187 * Rotates the stack registers in the push direction.
7188 *
7189 * @param pFpuCtx The FPU context.
7190 * @remarks This is a complete waste of time, but fxsave stores the registers in
7191 * stack order.
7192 */
7193DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7194{
7195 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7196 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7197 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7198 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7199 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7200 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7201 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7202 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7203 pFpuCtx->aRegs[0].r80 = r80Tmp;
7204}
7205
7206
7207/**
7208 * Rotates the stack registers in the pop direction.
7209 *
7210 * @param pFpuCtx The FPU context.
7211 * @remarks This is a complete waste of time, but fxsave stores the registers in
7212 * stack order.
7213 */
7214DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7215{
7216 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7217 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7218 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7219 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7220 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7221 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7222 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7223 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7224 pFpuCtx->aRegs[7].r80 = r80Tmp;
7225}
7226
7227
7228/**
7229 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7230 * exception prevents it.
7231 *
7232 * @param pResult The FPU operation result to push.
7233 * @param pFpuCtx The FPU context.
7234 */
7235IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7236{
7237 /* Update FSW and bail if there are pending exceptions afterwards. */
7238 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7239 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7240 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7241 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7242 {
7243 pFpuCtx->FSW = fFsw;
7244 return;
7245 }
7246
7247 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7248 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7249 {
7250 /* All is fine, push the actual value. */
7251 pFpuCtx->FTW |= RT_BIT(iNewTop);
7252 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7253 }
7254 else if (pFpuCtx->FCW & X86_FCW_IM)
7255 {
7256 /* Masked stack overflow, push QNaN. */
7257 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7258 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7259 }
7260 else
7261 {
7262 /* Raise stack overflow, don't push anything. */
7263 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7264 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7265 return;
7266 }
7267
7268 fFsw &= ~X86_FSW_TOP_MASK;
7269 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7270 pFpuCtx->FSW = fFsw;
7271
7272 iemFpuRotateStackPush(pFpuCtx);
7273}
7274
7275
7276/**
7277 * Stores a result in a FPU register and updates the FSW and FTW.
7278 *
7279 * @param pFpuCtx The FPU context.
7280 * @param pResult The result to store.
7281 * @param iStReg Which FPU register to store it in.
7282 */
7283IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7284{
7285 Assert(iStReg < 8);
7286 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7287 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7288 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7289 pFpuCtx->FTW |= RT_BIT(iReg);
7290 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7291}
7292
7293
7294/**
7295 * Only updates the FPU status word (FSW) with the result of the current
7296 * instruction.
7297 *
7298 * @param pFpuCtx The FPU context.
7299 * @param u16FSW The FSW output of the current instruction.
7300 */
7301IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7302{
7303 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7304 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7305}
7306
7307
7308/**
7309 * Pops one item off the FPU stack if no pending exception prevents it.
7310 *
7311 * @param pFpuCtx The FPU context.
7312 */
7313IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7314{
7315 /* Check pending exceptions. */
7316 uint16_t uFSW = pFpuCtx->FSW;
7317 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7318 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7319 return;
7320
7321 /* TOP--. */
7322 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7323 uFSW &= ~X86_FSW_TOP_MASK;
7324 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7325 pFpuCtx->FSW = uFSW;
7326
7327 /* Mark the previous ST0 as empty. */
7328 iOldTop >>= X86_FSW_TOP_SHIFT;
7329 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7330
7331 /* Rotate the registers. */
7332 iemFpuRotateStackPop(pFpuCtx);
7333}
7334
7335
7336/**
7337 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7338 *
7339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7340 * @param pResult The FPU operation result to push.
7341 */
7342IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7343{
7344 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7345 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7346 iemFpuMaybePushResult(pResult, pFpuCtx);
7347}
7348
7349
7350/**
7351 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7352 * and sets FPUDP and FPUDS.
7353 *
7354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7355 * @param pResult The FPU operation result to push.
7356 * @param iEffSeg The effective segment register.
7357 * @param GCPtrEff The effective address relative to @a iEffSeg.
7358 */
7359IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7360{
7361 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7362 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7363 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7364 iemFpuMaybePushResult(pResult, pFpuCtx);
7365}
7366
7367
7368/**
7369 * Replace ST0 with the first value and push the second onto the FPU stack,
7370 * unless a pending exception prevents it.
7371 *
7372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7373 * @param pResult The FPU operation result to store and push.
7374 */
7375IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7376{
7377 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7378 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7379
7380 /* Update FSW and bail if there are pending exceptions afterwards. */
7381 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7382 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7383 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7384 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7385 {
7386 pFpuCtx->FSW = fFsw;
7387 return;
7388 }
7389
7390 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7391 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7392 {
7393 /* All is fine, push the actual value. */
7394 pFpuCtx->FTW |= RT_BIT(iNewTop);
7395 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7396 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7397 }
7398 else if (pFpuCtx->FCW & X86_FCW_IM)
7399 {
7400 /* Masked stack overflow, push QNaN. */
7401 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7402 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7403 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7404 }
7405 else
7406 {
7407 /* Raise stack overflow, don't push anything. */
7408 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7409 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7410 return;
7411 }
7412
7413 fFsw &= ~X86_FSW_TOP_MASK;
7414 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7415 pFpuCtx->FSW = fFsw;
7416
7417 iemFpuRotateStackPush(pFpuCtx);
7418}
7419
7420
7421/**
7422 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7423 * FOP.
7424 *
7425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7426 * @param pResult The result to store.
7427 * @param iStReg Which FPU register to store it in.
7428 */
7429IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7430{
7431 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7432 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7433 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7434}
7435
7436
7437/**
7438 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7439 * FOP, and then pops the stack.
7440 *
7441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7442 * @param pResult The result to store.
7443 * @param iStReg Which FPU register to store it in.
7444 */
7445IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7446{
7447 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7448 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7449 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7450 iemFpuMaybePopOne(pFpuCtx);
7451}
7452
7453
7454/**
7455 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7456 * FPUDP, and FPUDS.
7457 *
7458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7459 * @param pResult The result to store.
7460 * @param iStReg Which FPU register to store it in.
7461 * @param iEffSeg The effective memory operand selector register.
7462 * @param GCPtrEff The effective memory operand offset.
7463 */
7464IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7465 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7466{
7467 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7468 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7469 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7470 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7471}
7472
7473
7474/**
7475 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7476 * FPUDP, and FPUDS, and then pops the stack.
7477 *
7478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7479 * @param pResult The result to store.
7480 * @param iStReg Which FPU register to store it in.
7481 * @param iEffSeg The effective memory operand selector register.
7482 * @param GCPtrEff The effective memory operand offset.
7483 */
7484IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7485 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7486{
7487 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7488 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7489 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7490 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7491 iemFpuMaybePopOne(pFpuCtx);
7492}
7493
7494
7495/**
7496 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7497 *
7498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7499 */
7500IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7501{
7502 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7503 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7504}
7505
7506
7507/**
7508 * Marks the specified stack register as free (for FFREE).
7509 *
7510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7511 * @param iStReg The register to free.
7512 */
7513IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7514{
7515 Assert(iStReg < 8);
7516 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7517 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7518 pFpuCtx->FTW &= ~RT_BIT(iReg);
7519}
7520
7521
7522/**
7523 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7524 *
7525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7526 */
7527IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7528{
7529 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7530 uint16_t uFsw = pFpuCtx->FSW;
7531 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7532 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7533 uFsw &= ~X86_FSW_TOP_MASK;
7534 uFsw |= uTop;
7535 pFpuCtx->FSW = uFsw;
7536}
7537
7538
7539/**
7540 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7541 *
7542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7543 */
7544IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7545{
7546 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7547 uint16_t uFsw = pFpuCtx->FSW;
7548 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7549 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7550 uFsw &= ~X86_FSW_TOP_MASK;
7551 uFsw |= uTop;
7552 pFpuCtx->FSW = uFsw;
7553}
7554
7555
7556/**
7557 * Updates the FSW, FOP, FPUIP, and FPUCS.
7558 *
7559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7560 * @param u16FSW The FSW from the current instruction.
7561 */
7562IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7563{
7564 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7565 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7566 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7567}
7568
7569
7570/**
7571 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7572 *
7573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7574 * @param u16FSW The FSW from the current instruction.
7575 */
7576IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7577{
7578 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7579 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7580 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7581 iemFpuMaybePopOne(pFpuCtx);
7582}
7583
7584
7585/**
7586 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7587 *
7588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7589 * @param u16FSW The FSW from the current instruction.
7590 * @param iEffSeg The effective memory operand selector register.
7591 * @param GCPtrEff The effective memory operand offset.
7592 */
7593IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7594{
7595 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7596 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7597 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7598 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7599}
7600
7601
7602/**
7603 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7604 *
7605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7606 * @param u16FSW The FSW from the current instruction.
7607 */
7608IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7609{
7610 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7611 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7612 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7613 iemFpuMaybePopOne(pFpuCtx);
7614 iemFpuMaybePopOne(pFpuCtx);
7615}
7616
7617
7618/**
7619 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7620 *
7621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7622 * @param u16FSW The FSW from the current instruction.
7623 * @param iEffSeg The effective memory operand selector register.
7624 * @param GCPtrEff The effective memory operand offset.
7625 */
7626IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7627{
7628 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7629 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7630 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7631 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7632 iemFpuMaybePopOne(pFpuCtx);
7633}
7634
7635
7636/**
7637 * Worker routine for raising an FPU stack underflow exception.
7638 *
7639 * @param pFpuCtx The FPU context.
7640 * @param iStReg The stack register being accessed.
7641 */
7642IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7643{
7644 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7645 if (pFpuCtx->FCW & X86_FCW_IM)
7646 {
7647 /* Masked underflow. */
7648 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7649 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7650 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7651 if (iStReg != UINT8_MAX)
7652 {
7653 pFpuCtx->FTW |= RT_BIT(iReg);
7654 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7655 }
7656 }
7657 else
7658 {
7659 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7660 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7661 }
7662}
7663
7664
7665/**
7666 * Raises a FPU stack underflow exception.
7667 *
7668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7669 * @param iStReg The destination register that should be loaded
7670 * with QNaN if \#IS is not masked. Specify
7671 * UINT8_MAX if none (like for fcom).
7672 */
7673DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7674{
7675 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7676 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7677 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7678}
7679
7680
7681DECL_NO_INLINE(IEM_STATIC, void)
7682iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7683{
7684 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7685 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7686 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7687 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7688}
7689
7690
7691DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7692{
7693 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7694 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7695 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7696 iemFpuMaybePopOne(pFpuCtx);
7697}
7698
7699
7700DECL_NO_INLINE(IEM_STATIC, void)
7701iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7702{
7703 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7704 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7705 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7706 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7707 iemFpuMaybePopOne(pFpuCtx);
7708}
7709
7710
7711DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7712{
7713 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7714 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7715 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7716 iemFpuMaybePopOne(pFpuCtx);
7717 iemFpuMaybePopOne(pFpuCtx);
7718}
7719
7720
7721DECL_NO_INLINE(IEM_STATIC, void)
7722iemFpuStackPushUnderflow(PVMCPU pVCpu)
7723{
7724 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7725 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7726
7727 if (pFpuCtx->FCW & X86_FCW_IM)
7728 {
7729 /* Masked overflow - Push QNaN. */
7730 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7731 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7732 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7733 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7734 pFpuCtx->FTW |= RT_BIT(iNewTop);
7735 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7736 iemFpuRotateStackPush(pFpuCtx);
7737 }
7738 else
7739 {
7740 /* Exception pending - don't change TOP or the register stack. */
7741 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7742 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7743 }
7744}
7745
7746
7747DECL_NO_INLINE(IEM_STATIC, void)
7748iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7749{
7750 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7751 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7752
7753 if (pFpuCtx->FCW & X86_FCW_IM)
7754 {
7755 /* Masked overflow - Push QNaN. */
7756 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7757 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7758 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7759 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7760 pFpuCtx->FTW |= RT_BIT(iNewTop);
7761 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7762 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7763 iemFpuRotateStackPush(pFpuCtx);
7764 }
7765 else
7766 {
7767 /* Exception pending - don't change TOP or the register stack. */
7768 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7769 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7770 }
7771}
7772
7773
7774/**
7775 * Worker routine for raising an FPU stack overflow exception on a push.
7776 *
7777 * @param pFpuCtx The FPU context.
7778 */
7779IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7780{
7781 if (pFpuCtx->FCW & X86_FCW_IM)
7782 {
7783 /* Masked overflow. */
7784 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7785 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7786 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7787 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7788 pFpuCtx->FTW |= RT_BIT(iNewTop);
7789 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7790 iemFpuRotateStackPush(pFpuCtx);
7791 }
7792 else
7793 {
7794 /* Exception pending - don't change TOP or the register stack. */
7795 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7796 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7797 }
7798}
7799
7800
7801/**
7802 * Raises a FPU stack overflow exception on a push.
7803 *
7804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7805 */
7806DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7807{
7808 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7809 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7810 iemFpuStackPushOverflowOnly(pFpuCtx);
7811}
7812
7813
7814/**
7815 * Raises a FPU stack overflow exception on a push with a memory operand.
7816 *
7817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7818 * @param iEffSeg The effective memory operand selector register.
7819 * @param GCPtrEff The effective memory operand offset.
7820 */
7821DECL_NO_INLINE(IEM_STATIC, void)
7822iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7823{
7824 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7825 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7826 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7827 iemFpuStackPushOverflowOnly(pFpuCtx);
7828}
7829
7830
7831IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7832{
7833 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7834 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7835 if (pFpuCtx->FTW & RT_BIT(iReg))
7836 return VINF_SUCCESS;
7837 return VERR_NOT_FOUND;
7838}
7839
7840
7841IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7842{
7843 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7844 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7845 if (pFpuCtx->FTW & RT_BIT(iReg))
7846 {
7847 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7848 return VINF_SUCCESS;
7849 }
7850 return VERR_NOT_FOUND;
7851}
7852
7853
7854IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7855 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7856{
7857 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7858 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7859 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7860 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7861 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7862 {
7863 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7864 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7865 return VINF_SUCCESS;
7866 }
7867 return VERR_NOT_FOUND;
7868}
7869
7870
7871IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7872{
7873 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7874 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7875 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7876 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7877 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7878 {
7879 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7880 return VINF_SUCCESS;
7881 }
7882 return VERR_NOT_FOUND;
7883}
7884
7885
7886/**
7887 * Updates the FPU exception status after FCW is changed.
7888 *
7889 * @param pFpuCtx The FPU context.
7890 */
7891IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7892{
7893 uint16_t u16Fsw = pFpuCtx->FSW;
7894 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7895 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7896 else
7897 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7898 pFpuCtx->FSW = u16Fsw;
7899}
7900
7901
7902/**
7903 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7904 *
7905 * @returns The full FTW.
7906 * @param pFpuCtx The FPU context.
7907 */
7908IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7909{
7910 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7911 uint16_t u16Ftw = 0;
7912 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7913 for (unsigned iSt = 0; iSt < 8; iSt++)
7914 {
7915 unsigned const iReg = (iSt + iTop) & 7;
7916 if (!(u8Ftw & RT_BIT(iReg)))
7917 u16Ftw |= 3 << (iReg * 2); /* empty */
7918 else
7919 {
7920 uint16_t uTag;
7921 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7922 if (pr80Reg->s.uExponent == 0x7fff)
7923 uTag = 2; /* Exponent is all 1's => Special. */
7924 else if (pr80Reg->s.uExponent == 0x0000)
7925 {
7926 if (pr80Reg->s.u64Mantissa == 0x0000)
7927 uTag = 1; /* All bits are zero => Zero. */
7928 else
7929 uTag = 2; /* Must be special. */
7930 }
7931 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7932 uTag = 0; /* Valid. */
7933 else
7934 uTag = 2; /* Must be special. */
7935
7936 u16Ftw |= uTag << (iReg * 2); /* empty */
7937 }
7938 }
7939
7940 return u16Ftw;
7941}
7942
7943
7944/**
7945 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7946 *
7947 * @returns The compressed FTW.
7948 * @param u16FullFtw The full FTW to convert.
7949 */
7950IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7951{
7952 uint8_t u8Ftw = 0;
7953 for (unsigned i = 0; i < 8; i++)
7954 {
7955 if ((u16FullFtw & 3) != 3 /*empty*/)
7956 u8Ftw |= RT_BIT(i);
7957 u16FullFtw >>= 2;
7958 }
7959
7960 return u8Ftw;
7961}
7962
7963/** @} */
7964
7965
7966/** @name Memory access.
7967 *
7968 * @{
7969 */
7970
7971
7972/**
7973 * Updates the IEMCPU::cbWritten counter if applicable.
7974 *
7975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7976 * @param fAccess The access being accounted for.
7977 * @param cbMem The access size.
7978 */
7979DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7980{
7981 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7982 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7983 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7984}
7985
7986
7987/**
7988 * Checks if the given segment can be written to, raise the appropriate
7989 * exception if not.
7990 *
7991 * @returns VBox strict status code.
7992 *
7993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7994 * @param pHid Pointer to the hidden register.
7995 * @param iSegReg The register number.
7996 * @param pu64BaseAddr Where to return the base address to use for the
7997 * segment. (In 64-bit code it may differ from the
7998 * base in the hidden segment.)
7999 */
8000IEM_STATIC VBOXSTRICTRC
8001iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8002{
8003 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8004
8005 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8006 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8007 else
8008 {
8009 if (!pHid->Attr.n.u1Present)
8010 {
8011 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8012 AssertRelease(uSel == 0);
8013 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8014 return iemRaiseGeneralProtectionFault0(pVCpu);
8015 }
8016
8017 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8018 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8019 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8020 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8021 *pu64BaseAddr = pHid->u64Base;
8022 }
8023 return VINF_SUCCESS;
8024}
8025
8026
8027/**
8028 * Checks if the given segment can be read from, raise the appropriate
8029 * exception if not.
8030 *
8031 * @returns VBox strict status code.
8032 *
8033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8034 * @param pHid Pointer to the hidden register.
8035 * @param iSegReg The register number.
8036 * @param pu64BaseAddr Where to return the base address to use for the
8037 * segment. (In 64-bit code it may differ from the
8038 * base in the hidden segment.)
8039 */
8040IEM_STATIC VBOXSTRICTRC
8041iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8042{
8043 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8044
8045 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8046 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8047 else
8048 {
8049 if (!pHid->Attr.n.u1Present)
8050 {
8051 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8052 AssertRelease(uSel == 0);
8053 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8054 return iemRaiseGeneralProtectionFault0(pVCpu);
8055 }
8056
8057 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8058 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8059 *pu64BaseAddr = pHid->u64Base;
8060 }
8061 return VINF_SUCCESS;
8062}
8063
8064
8065/**
8066 * Applies the segment limit, base and attributes.
8067 *
8068 * This may raise a \#GP or \#SS.
8069 *
8070 * @returns VBox strict status code.
8071 *
8072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8073 * @param fAccess The kind of access which is being performed.
8074 * @param iSegReg The index of the segment register to apply.
8075 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8076 * TSS, ++).
8077 * @param cbMem The access size.
8078 * @param pGCPtrMem Pointer to the guest memory address to apply
8079 * segmentation to. Input and output parameter.
8080 */
8081IEM_STATIC VBOXSTRICTRC
8082iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8083{
8084 if (iSegReg == UINT8_MAX)
8085 return VINF_SUCCESS;
8086
8087 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8088 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8089 switch (pVCpu->iem.s.enmCpuMode)
8090 {
8091 case IEMMODE_16BIT:
8092 case IEMMODE_32BIT:
8093 {
8094 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8095 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8096
8097 if ( pSel->Attr.n.u1Present
8098 && !pSel->Attr.n.u1Unusable)
8099 {
8100 Assert(pSel->Attr.n.u1DescType);
8101 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8102 {
8103 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8104 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8105 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8106
8107 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8108 {
8109 /** @todo CPL check. */
8110 }
8111
8112 /*
8113 * There are two kinds of data selectors, normal and expand down.
8114 */
8115 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8116 {
8117 if ( GCPtrFirst32 > pSel->u32Limit
8118 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8119 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8120 }
8121 else
8122 {
8123 /*
8124 * The upper boundary is defined by the B bit, not the G bit!
8125 */
8126 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8127 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8128 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8129 }
8130 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8131 }
8132 else
8133 {
8134
8135 /*
8136 * Code selector and usually be used to read thru, writing is
8137 * only permitted in real and V8086 mode.
8138 */
8139 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8140 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8141 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8142 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8143 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8144
8145 if ( GCPtrFirst32 > pSel->u32Limit
8146 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8147 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8148
8149 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8150 {
8151 /** @todo CPL check. */
8152 }
8153
8154 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8155 }
8156 }
8157 else
8158 return iemRaiseGeneralProtectionFault0(pVCpu);
8159 return VINF_SUCCESS;
8160 }
8161
8162 case IEMMODE_64BIT:
8163 {
8164 RTGCPTR GCPtrMem = *pGCPtrMem;
8165 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8166 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8167
8168 Assert(cbMem >= 1);
8169 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8170 return VINF_SUCCESS;
8171 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8172 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8173 return iemRaiseGeneralProtectionFault0(pVCpu);
8174 }
8175
8176 default:
8177 AssertFailedReturn(VERR_IEM_IPE_7);
8178 }
8179}
8180
8181
8182/**
8183 * Translates a virtual address to a physical physical address and checks if we
8184 * can access the page as specified.
8185 *
8186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8187 * @param GCPtrMem The virtual address.
8188 * @param fAccess The intended access.
8189 * @param pGCPhysMem Where to return the physical address.
8190 */
8191IEM_STATIC VBOXSTRICTRC
8192iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8193{
8194 /** @todo Need a different PGM interface here. We're currently using
8195 * generic / REM interfaces. this won't cut it for R0 & RC. */
8196 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8197 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8198 RTGCPHYS GCPhys;
8199 uint64_t fFlags;
8200 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8201 if (RT_FAILURE(rc))
8202 {
8203 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8204 /** @todo Check unassigned memory in unpaged mode. */
8205 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8206 *pGCPhysMem = NIL_RTGCPHYS;
8207 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8208 }
8209
8210 /* If the page is writable and does not have the no-exec bit set, all
8211 access is allowed. Otherwise we'll have to check more carefully... */
8212 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8213 {
8214 /* Write to read only memory? */
8215 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8216 && !(fFlags & X86_PTE_RW)
8217 && ( (pVCpu->iem.s.uCpl == 3
8218 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8219 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8220 {
8221 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8222 *pGCPhysMem = NIL_RTGCPHYS;
8223 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8224 }
8225
8226 /* Kernel memory accessed by userland? */
8227 if ( !(fFlags & X86_PTE_US)
8228 && pVCpu->iem.s.uCpl == 3
8229 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8230 {
8231 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8232 *pGCPhysMem = NIL_RTGCPHYS;
8233 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8234 }
8235
8236 /* Executing non-executable memory? */
8237 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8238 && (fFlags & X86_PTE_PAE_NX)
8239 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8240 {
8241 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8242 *pGCPhysMem = NIL_RTGCPHYS;
8243 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8244 VERR_ACCESS_DENIED);
8245 }
8246 }
8247
8248 /*
8249 * Set the dirty / access flags.
8250 * ASSUMES this is set when the address is translated rather than on committ...
8251 */
8252 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8253 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8254 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8255 {
8256 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8257 AssertRC(rc2);
8258 }
8259
8260 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8261 *pGCPhysMem = GCPhys;
8262 return VINF_SUCCESS;
8263}
8264
8265
8266
8267/**
8268 * Maps a physical page.
8269 *
8270 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8272 * @param GCPhysMem The physical address.
8273 * @param fAccess The intended access.
8274 * @param ppvMem Where to return the mapping address.
8275 * @param pLock The PGM lock.
8276 */
8277IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8278{
8279#ifdef IEM_LOG_MEMORY_WRITES
8280 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8281 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8282#endif
8283
8284 /** @todo This API may require some improving later. A private deal with PGM
8285 * regarding locking and unlocking needs to be struct. A couple of TLBs
8286 * living in PGM, but with publicly accessible inlined access methods
8287 * could perhaps be an even better solution. */
8288 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8289 GCPhysMem,
8290 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8291 pVCpu->iem.s.fBypassHandlers,
8292 ppvMem,
8293 pLock);
8294 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8295 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8296
8297 return rc;
8298}
8299
8300
8301/**
8302 * Unmap a page previously mapped by iemMemPageMap.
8303 *
8304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8305 * @param GCPhysMem The physical address.
8306 * @param fAccess The intended access.
8307 * @param pvMem What iemMemPageMap returned.
8308 * @param pLock The PGM lock.
8309 */
8310DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8311{
8312 NOREF(pVCpu);
8313 NOREF(GCPhysMem);
8314 NOREF(fAccess);
8315 NOREF(pvMem);
8316 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8317}
8318
8319
8320/**
8321 * Looks up a memory mapping entry.
8322 *
8323 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8325 * @param pvMem The memory address.
8326 * @param fAccess The access to.
8327 */
8328DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8329{
8330 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8331 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8332 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8333 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8334 return 0;
8335 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8336 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8337 return 1;
8338 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8339 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8340 return 2;
8341 return VERR_NOT_FOUND;
8342}
8343
8344
8345/**
8346 * Finds a free memmap entry when using iNextMapping doesn't work.
8347 *
8348 * @returns Memory mapping index, 1024 on failure.
8349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8350 */
8351IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8352{
8353 /*
8354 * The easy case.
8355 */
8356 if (pVCpu->iem.s.cActiveMappings == 0)
8357 {
8358 pVCpu->iem.s.iNextMapping = 1;
8359 return 0;
8360 }
8361
8362 /* There should be enough mappings for all instructions. */
8363 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8364
8365 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8366 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8367 return i;
8368
8369 AssertFailedReturn(1024);
8370}
8371
8372
8373/**
8374 * Commits a bounce buffer that needs writing back and unmaps it.
8375 *
8376 * @returns Strict VBox status code.
8377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8378 * @param iMemMap The index of the buffer to commit.
8379 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8380 * Always false in ring-3, obviously.
8381 */
8382IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8383{
8384 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8385 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8386#ifdef IN_RING3
8387 Assert(!fPostponeFail);
8388 RT_NOREF_PV(fPostponeFail);
8389#endif
8390
8391 /*
8392 * Do the writing.
8393 */
8394 PVM pVM = pVCpu->CTX_SUFF(pVM);
8395 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8396 {
8397 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8398 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8399 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8400 if (!pVCpu->iem.s.fBypassHandlers)
8401 {
8402 /*
8403 * Carefully and efficiently dealing with access handler return
8404 * codes make this a little bloated.
8405 */
8406 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8408 pbBuf,
8409 cbFirst,
8410 PGMACCESSORIGIN_IEM);
8411 if (rcStrict == VINF_SUCCESS)
8412 {
8413 if (cbSecond)
8414 {
8415 rcStrict = PGMPhysWrite(pVM,
8416 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8417 pbBuf + cbFirst,
8418 cbSecond,
8419 PGMACCESSORIGIN_IEM);
8420 if (rcStrict == VINF_SUCCESS)
8421 { /* nothing */ }
8422 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8423 {
8424 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8427 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8428 }
8429#ifndef IN_RING3
8430 else if (fPostponeFail)
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8435 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8436 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8437 return iemSetPassUpStatus(pVCpu, rcStrict);
8438 }
8439#endif
8440 else
8441 {
8442 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8445 return rcStrict;
8446 }
8447 }
8448 }
8449 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8450 {
8451 if (!cbSecond)
8452 {
8453 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8455 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8456 }
8457 else
8458 {
8459 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8460 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8461 pbBuf + cbFirst,
8462 cbSecond,
8463 PGMACCESSORIGIN_IEM);
8464 if (rcStrict2 == VINF_SUCCESS)
8465 {
8466 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8469 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8470 }
8471 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8472 {
8473 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8476 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8477 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8478 }
8479#ifndef IN_RING3
8480 else if (fPostponeFail)
8481 {
8482 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8485 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8486 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8487 return iemSetPassUpStatus(pVCpu, rcStrict);
8488 }
8489#endif
8490 else
8491 {
8492 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8495 return rcStrict2;
8496 }
8497 }
8498 }
8499#ifndef IN_RING3
8500 else if (fPostponeFail)
8501 {
8502 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8503 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8505 if (!cbSecond)
8506 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8507 else
8508 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8509 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8510 return iemSetPassUpStatus(pVCpu, rcStrict);
8511 }
8512#endif
8513 else
8514 {
8515 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8516 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8517 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8518 return rcStrict;
8519 }
8520 }
8521 else
8522 {
8523 /*
8524 * No access handlers, much simpler.
8525 */
8526 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8527 if (RT_SUCCESS(rc))
8528 {
8529 if (cbSecond)
8530 {
8531 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8532 if (RT_SUCCESS(rc))
8533 { /* likely */ }
8534 else
8535 {
8536 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8537 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8538 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8539 return rc;
8540 }
8541 }
8542 }
8543 else
8544 {
8545 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8546 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8548 return rc;
8549 }
8550 }
8551 }
8552
8553#if defined(IEM_LOG_MEMORY_WRITES)
8554 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8555 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8556 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8557 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8558 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8559 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8560
8561 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8562 g_cbIemWrote = cbWrote;
8563 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8564#endif
8565
8566 /*
8567 * Free the mapping entry.
8568 */
8569 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8570 Assert(pVCpu->iem.s.cActiveMappings != 0);
8571 pVCpu->iem.s.cActiveMappings--;
8572 return VINF_SUCCESS;
8573}
8574
8575
8576/**
8577 * iemMemMap worker that deals with a request crossing pages.
8578 */
8579IEM_STATIC VBOXSTRICTRC
8580iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8581{
8582 /*
8583 * Do the address translations.
8584 */
8585 RTGCPHYS GCPhysFirst;
8586 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8587 if (rcStrict != VINF_SUCCESS)
8588 return rcStrict;
8589
8590 RTGCPHYS GCPhysSecond;
8591 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8592 fAccess, &GCPhysSecond);
8593 if (rcStrict != VINF_SUCCESS)
8594 return rcStrict;
8595 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8596
8597 PVM pVM = pVCpu->CTX_SUFF(pVM);
8598
8599 /*
8600 * Read in the current memory content if it's a read, execute or partial
8601 * write access.
8602 */
8603 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8604 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8605 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8606
8607 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8608 {
8609 if (!pVCpu->iem.s.fBypassHandlers)
8610 {
8611 /*
8612 * Must carefully deal with access handler status codes here,
8613 * makes the code a bit bloated.
8614 */
8615 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8616 if (rcStrict == VINF_SUCCESS)
8617 {
8618 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8619 if (rcStrict == VINF_SUCCESS)
8620 { /*likely */ }
8621 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8622 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8623 else
8624 {
8625 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8626 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8627 return rcStrict;
8628 }
8629 }
8630 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8631 {
8632 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8633 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8634 {
8635 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8636 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8637 }
8638 else
8639 {
8640 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8641 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8642 return rcStrict2;
8643 }
8644 }
8645 else
8646 {
8647 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8648 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8649 return rcStrict;
8650 }
8651 }
8652 else
8653 {
8654 /*
8655 * No informational status codes here, much more straight forward.
8656 */
8657 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8658 if (RT_SUCCESS(rc))
8659 {
8660 Assert(rc == VINF_SUCCESS);
8661 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8662 if (RT_SUCCESS(rc))
8663 Assert(rc == VINF_SUCCESS);
8664 else
8665 {
8666 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8667 return rc;
8668 }
8669 }
8670 else
8671 {
8672 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8673 return rc;
8674 }
8675 }
8676 }
8677#ifdef VBOX_STRICT
8678 else
8679 memset(pbBuf, 0xcc, cbMem);
8680 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8681 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8682#endif
8683
8684 /*
8685 * Commit the bounce buffer entry.
8686 */
8687 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8688 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8689 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8690 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8691 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8692 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8693 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8694 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8695 pVCpu->iem.s.cActiveMappings++;
8696
8697 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8698 *ppvMem = pbBuf;
8699 return VINF_SUCCESS;
8700}
8701
8702
8703/**
8704 * iemMemMap woker that deals with iemMemPageMap failures.
8705 */
8706IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8707 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8708{
8709 /*
8710 * Filter out conditions we can handle and the ones which shouldn't happen.
8711 */
8712 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8713 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8714 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8715 {
8716 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8717 return rcMap;
8718 }
8719 pVCpu->iem.s.cPotentialExits++;
8720
8721 /*
8722 * Read in the current memory content if it's a read, execute or partial
8723 * write access.
8724 */
8725 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8726 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8727 {
8728 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8729 memset(pbBuf, 0xff, cbMem);
8730 else
8731 {
8732 int rc;
8733 if (!pVCpu->iem.s.fBypassHandlers)
8734 {
8735 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8736 if (rcStrict == VINF_SUCCESS)
8737 { /* nothing */ }
8738 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8739 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8740 else
8741 {
8742 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8743 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8744 return rcStrict;
8745 }
8746 }
8747 else
8748 {
8749 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8750 if (RT_SUCCESS(rc))
8751 { /* likely */ }
8752 else
8753 {
8754 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8755 GCPhysFirst, rc));
8756 return rc;
8757 }
8758 }
8759 }
8760 }
8761#ifdef VBOX_STRICT
8762 else
8763 memset(pbBuf, 0xcc, cbMem);
8764#endif
8765#ifdef VBOX_STRICT
8766 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8767 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8768#endif
8769
8770 /*
8771 * Commit the bounce buffer entry.
8772 */
8773 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8774 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8775 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8776 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8777 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8778 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8779 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8780 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8781 pVCpu->iem.s.cActiveMappings++;
8782
8783 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8784 *ppvMem = pbBuf;
8785 return VINF_SUCCESS;
8786}
8787
8788
8789
8790/**
8791 * Maps the specified guest memory for the given kind of access.
8792 *
8793 * This may be using bounce buffering of the memory if it's crossing a page
8794 * boundary or if there is an access handler installed for any of it. Because
8795 * of lock prefix guarantees, we're in for some extra clutter when this
8796 * happens.
8797 *
8798 * This may raise a \#GP, \#SS, \#PF or \#AC.
8799 *
8800 * @returns VBox strict status code.
8801 *
8802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8803 * @param ppvMem Where to return the pointer to the mapped
8804 * memory.
8805 * @param cbMem The number of bytes to map. This is usually 1,
8806 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8807 * string operations it can be up to a page.
8808 * @param iSegReg The index of the segment register to use for
8809 * this access. The base and limits are checked.
8810 * Use UINT8_MAX to indicate that no segmentation
8811 * is required (for IDT, GDT and LDT accesses).
8812 * @param GCPtrMem The address of the guest memory.
8813 * @param fAccess How the memory is being accessed. The
8814 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8815 * how to map the memory, while the
8816 * IEM_ACCESS_WHAT_XXX bit is used when raising
8817 * exceptions.
8818 */
8819IEM_STATIC VBOXSTRICTRC
8820iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8821{
8822 /*
8823 * Check the input and figure out which mapping entry to use.
8824 */
8825 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8826 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8827 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8828
8829 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8830 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8831 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8832 {
8833 iMemMap = iemMemMapFindFree(pVCpu);
8834 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8835 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8836 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8837 pVCpu->iem.s.aMemMappings[2].fAccess),
8838 VERR_IEM_IPE_9);
8839 }
8840
8841 /*
8842 * Map the memory, checking that we can actually access it. If something
8843 * slightly complicated happens, fall back on bounce buffering.
8844 */
8845 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8846 if (rcStrict != VINF_SUCCESS)
8847 return rcStrict;
8848
8849 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8850 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8851
8852 RTGCPHYS GCPhysFirst;
8853 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8854 if (rcStrict != VINF_SUCCESS)
8855 return rcStrict;
8856
8857 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8858 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8859 if (fAccess & IEM_ACCESS_TYPE_READ)
8860 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8861
8862 void *pvMem;
8863 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8864 if (rcStrict != VINF_SUCCESS)
8865 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8866
8867 /*
8868 * Fill in the mapping table entry.
8869 */
8870 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8871 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8872 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8873 pVCpu->iem.s.cActiveMappings++;
8874
8875 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8876 *ppvMem = pvMem;
8877 return VINF_SUCCESS;
8878}
8879
8880
8881/**
8882 * Commits the guest memory if bounce buffered and unmaps it.
8883 *
8884 * @returns Strict VBox status code.
8885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8886 * @param pvMem The mapping.
8887 * @param fAccess The kind of access.
8888 */
8889IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8890{
8891 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8892 AssertReturn(iMemMap >= 0, iMemMap);
8893
8894 /* If it's bounce buffered, we may need to write back the buffer. */
8895 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8896 {
8897 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8898 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8899 }
8900 /* Otherwise unlock it. */
8901 else
8902 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8903
8904 /* Free the entry. */
8905 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8906 Assert(pVCpu->iem.s.cActiveMappings != 0);
8907 pVCpu->iem.s.cActiveMappings--;
8908 return VINF_SUCCESS;
8909}
8910
8911#ifdef IEM_WITH_SETJMP
8912
8913/**
8914 * Maps the specified guest memory for the given kind of access, longjmp on
8915 * error.
8916 *
8917 * This may be using bounce buffering of the memory if it's crossing a page
8918 * boundary or if there is an access handler installed for any of it. Because
8919 * of lock prefix guarantees, we're in for some extra clutter when this
8920 * happens.
8921 *
8922 * This may raise a \#GP, \#SS, \#PF or \#AC.
8923 *
8924 * @returns Pointer to the mapped memory.
8925 *
8926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8927 * @param cbMem The number of bytes to map. This is usually 1,
8928 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8929 * string operations it can be up to a page.
8930 * @param iSegReg The index of the segment register to use for
8931 * this access. The base and limits are checked.
8932 * Use UINT8_MAX to indicate that no segmentation
8933 * is required (for IDT, GDT and LDT accesses).
8934 * @param GCPtrMem The address of the guest memory.
8935 * @param fAccess How the memory is being accessed. The
8936 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8937 * how to map the memory, while the
8938 * IEM_ACCESS_WHAT_XXX bit is used when raising
8939 * exceptions.
8940 */
8941IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8942{
8943 /*
8944 * Check the input and figure out which mapping entry to use.
8945 */
8946 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8947 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8948 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8949
8950 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8951 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8952 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8953 {
8954 iMemMap = iemMemMapFindFree(pVCpu);
8955 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8956 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8957 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8958 pVCpu->iem.s.aMemMappings[2].fAccess),
8959 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8960 }
8961
8962 /*
8963 * Map the memory, checking that we can actually access it. If something
8964 * slightly complicated happens, fall back on bounce buffering.
8965 */
8966 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8967 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8968 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8969
8970 /* Crossing a page boundary? */
8971 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8972 { /* No (likely). */ }
8973 else
8974 {
8975 void *pvMem;
8976 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8977 if (rcStrict == VINF_SUCCESS)
8978 return pvMem;
8979 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8980 }
8981
8982 RTGCPHYS GCPhysFirst;
8983 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8984 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8985 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8986
8987 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8988 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8989 if (fAccess & IEM_ACCESS_TYPE_READ)
8990 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8991
8992 void *pvMem;
8993 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8994 if (rcStrict == VINF_SUCCESS)
8995 { /* likely */ }
8996 else
8997 {
8998 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8999 if (rcStrict == VINF_SUCCESS)
9000 return pvMem;
9001 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9002 }
9003
9004 /*
9005 * Fill in the mapping table entry.
9006 */
9007 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9008 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9009 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9010 pVCpu->iem.s.cActiveMappings++;
9011
9012 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9013 return pvMem;
9014}
9015
9016
9017/**
9018 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9019 *
9020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9021 * @param pvMem The mapping.
9022 * @param fAccess The kind of access.
9023 */
9024IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9025{
9026 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9027 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9028
9029 /* If it's bounce buffered, we may need to write back the buffer. */
9030 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9031 {
9032 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9033 {
9034 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9035 if (rcStrict == VINF_SUCCESS)
9036 return;
9037 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9038 }
9039 }
9040 /* Otherwise unlock it. */
9041 else
9042 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9043
9044 /* Free the entry. */
9045 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9046 Assert(pVCpu->iem.s.cActiveMappings != 0);
9047 pVCpu->iem.s.cActiveMappings--;
9048}
9049
9050#endif /* IEM_WITH_SETJMP */
9051
9052#ifndef IN_RING3
9053/**
9054 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9055 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9056 *
9057 * Allows the instruction to be completed and retired, while the IEM user will
9058 * return to ring-3 immediately afterwards and do the postponed writes there.
9059 *
9060 * @returns VBox status code (no strict statuses). Caller must check
9061 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9063 * @param pvMem The mapping.
9064 * @param fAccess The kind of access.
9065 */
9066IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9067{
9068 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9069 AssertReturn(iMemMap >= 0, iMemMap);
9070
9071 /* If it's bounce buffered, we may need to write back the buffer. */
9072 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9073 {
9074 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9075 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9076 }
9077 /* Otherwise unlock it. */
9078 else
9079 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9080
9081 /* Free the entry. */
9082 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9083 Assert(pVCpu->iem.s.cActiveMappings != 0);
9084 pVCpu->iem.s.cActiveMappings--;
9085 return VINF_SUCCESS;
9086}
9087#endif
9088
9089
9090/**
9091 * Rollbacks mappings, releasing page locks and such.
9092 *
9093 * The caller shall only call this after checking cActiveMappings.
9094 *
9095 * @returns Strict VBox status code to pass up.
9096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9097 */
9098IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9099{
9100 Assert(pVCpu->iem.s.cActiveMappings > 0);
9101
9102 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9103 while (iMemMap-- > 0)
9104 {
9105 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9106 if (fAccess != IEM_ACCESS_INVALID)
9107 {
9108 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9109 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9110 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9111 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9112 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9113 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9114 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9115 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9116 pVCpu->iem.s.cActiveMappings--;
9117 }
9118 }
9119}
9120
9121
9122/**
9123 * Fetches a data byte.
9124 *
9125 * @returns Strict VBox status code.
9126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9127 * @param pu8Dst Where to return the byte.
9128 * @param iSegReg The index of the segment register to use for
9129 * this access. The base and limits are checked.
9130 * @param GCPtrMem The address of the guest memory.
9131 */
9132IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9133{
9134 /* The lazy approach for now... */
9135 uint8_t const *pu8Src;
9136 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9137 if (rc == VINF_SUCCESS)
9138 {
9139 *pu8Dst = *pu8Src;
9140 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9141 }
9142 return rc;
9143}
9144
9145
9146#ifdef IEM_WITH_SETJMP
9147/**
9148 * Fetches a data byte, longjmp on error.
9149 *
9150 * @returns The byte.
9151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9152 * @param iSegReg The index of the segment register to use for
9153 * this access. The base and limits are checked.
9154 * @param GCPtrMem The address of the guest memory.
9155 */
9156DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9157{
9158 /* The lazy approach for now... */
9159 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9160 uint8_t const bRet = *pu8Src;
9161 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9162 return bRet;
9163}
9164#endif /* IEM_WITH_SETJMP */
9165
9166
9167/**
9168 * Fetches a data word.
9169 *
9170 * @returns Strict VBox status code.
9171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9172 * @param pu16Dst Where to return the word.
9173 * @param iSegReg The index of the segment register to use for
9174 * this access. The base and limits are checked.
9175 * @param GCPtrMem The address of the guest memory.
9176 */
9177IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9178{
9179 /* The lazy approach for now... */
9180 uint16_t const *pu16Src;
9181 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9182 if (rc == VINF_SUCCESS)
9183 {
9184 *pu16Dst = *pu16Src;
9185 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9186 }
9187 return rc;
9188}
9189
9190
9191#ifdef IEM_WITH_SETJMP
9192/**
9193 * Fetches a data word, longjmp on error.
9194 *
9195 * @returns The word
9196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9197 * @param iSegReg The index of the segment register to use for
9198 * this access. The base and limits are checked.
9199 * @param GCPtrMem The address of the guest memory.
9200 */
9201DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9202{
9203 /* The lazy approach for now... */
9204 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9205 uint16_t const u16Ret = *pu16Src;
9206 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9207 return u16Ret;
9208}
9209#endif
9210
9211
9212/**
9213 * Fetches a data dword.
9214 *
9215 * @returns Strict VBox status code.
9216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9217 * @param pu32Dst Where to return the dword.
9218 * @param iSegReg The index of the segment register to use for
9219 * this access. The base and limits are checked.
9220 * @param GCPtrMem The address of the guest memory.
9221 */
9222IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9223{
9224 /* The lazy approach for now... */
9225 uint32_t const *pu32Src;
9226 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9227 if (rc == VINF_SUCCESS)
9228 {
9229 *pu32Dst = *pu32Src;
9230 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9231 }
9232 return rc;
9233}
9234
9235
9236#ifdef IEM_WITH_SETJMP
9237
9238IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9239{
9240 Assert(cbMem >= 1);
9241 Assert(iSegReg < X86_SREG_COUNT);
9242
9243 /*
9244 * 64-bit mode is simpler.
9245 */
9246 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9247 {
9248 if (iSegReg >= X86_SREG_FS)
9249 {
9250 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9251 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9252 GCPtrMem += pSel->u64Base;
9253 }
9254
9255 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9256 return GCPtrMem;
9257 }
9258 /*
9259 * 16-bit and 32-bit segmentation.
9260 */
9261 else
9262 {
9263 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9264 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9265 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9266 == X86DESCATTR_P /* data, expand up */
9267 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9268 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9269 {
9270 /* expand up */
9271 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9272 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9273 && GCPtrLast32 > (uint32_t)GCPtrMem))
9274 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9275 }
9276 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9277 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9278 {
9279 /* expand down */
9280 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9281 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9282 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9283 && GCPtrLast32 > (uint32_t)GCPtrMem))
9284 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9285 }
9286 else
9287 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9288 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9289 }
9290 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9291}
9292
9293
9294IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9295{
9296 Assert(cbMem >= 1);
9297 Assert(iSegReg < X86_SREG_COUNT);
9298
9299 /*
9300 * 64-bit mode is simpler.
9301 */
9302 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9303 {
9304 if (iSegReg >= X86_SREG_FS)
9305 {
9306 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9307 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9308 GCPtrMem += pSel->u64Base;
9309 }
9310
9311 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9312 return GCPtrMem;
9313 }
9314 /*
9315 * 16-bit and 32-bit segmentation.
9316 */
9317 else
9318 {
9319 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9320 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9321 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9322 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9323 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9324 {
9325 /* expand up */
9326 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9327 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9328 && GCPtrLast32 > (uint32_t)GCPtrMem))
9329 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9330 }
9331 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9332 {
9333 /* expand down */
9334 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9335 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9336 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9337 && GCPtrLast32 > (uint32_t)GCPtrMem))
9338 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9339 }
9340 else
9341 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9342 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9343 }
9344 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9345}
9346
9347
9348/**
9349 * Fetches a data dword, longjmp on error, fallback/safe version.
9350 *
9351 * @returns The dword
9352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9353 * @param iSegReg The index of the segment register to use for
9354 * this access. The base and limits are checked.
9355 * @param GCPtrMem The address of the guest memory.
9356 */
9357IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9358{
9359 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9360 uint32_t const u32Ret = *pu32Src;
9361 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9362 return u32Ret;
9363}
9364
9365
9366/**
9367 * Fetches a data dword, longjmp on error.
9368 *
9369 * @returns The dword
9370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9371 * @param iSegReg The index of the segment register to use for
9372 * this access. The base and limits are checked.
9373 * @param GCPtrMem The address of the guest memory.
9374 */
9375DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9376{
9377# ifdef IEM_WITH_DATA_TLB
9378 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9379 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9380 {
9381 /// @todo more later.
9382 }
9383
9384 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9385# else
9386 /* The lazy approach. */
9387 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9388 uint32_t const u32Ret = *pu32Src;
9389 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9390 return u32Ret;
9391# endif
9392}
9393#endif
9394
9395
9396#ifdef SOME_UNUSED_FUNCTION
9397/**
9398 * Fetches a data dword and sign extends it to a qword.
9399 *
9400 * @returns Strict VBox status code.
9401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9402 * @param pu64Dst Where to return the sign extended value.
9403 * @param iSegReg The index of the segment register to use for
9404 * this access. The base and limits are checked.
9405 * @param GCPtrMem The address of the guest memory.
9406 */
9407IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9408{
9409 /* The lazy approach for now... */
9410 int32_t const *pi32Src;
9411 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9412 if (rc == VINF_SUCCESS)
9413 {
9414 *pu64Dst = *pi32Src;
9415 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9416 }
9417#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9418 else
9419 *pu64Dst = 0;
9420#endif
9421 return rc;
9422}
9423#endif
9424
9425
9426/**
9427 * Fetches a data qword.
9428 *
9429 * @returns Strict VBox status code.
9430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9431 * @param pu64Dst Where to return the qword.
9432 * @param iSegReg The index of the segment register to use for
9433 * this access. The base and limits are checked.
9434 * @param GCPtrMem The address of the guest memory.
9435 */
9436IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9437{
9438 /* The lazy approach for now... */
9439 uint64_t const *pu64Src;
9440 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9441 if (rc == VINF_SUCCESS)
9442 {
9443 *pu64Dst = *pu64Src;
9444 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9445 }
9446 return rc;
9447}
9448
9449
9450#ifdef IEM_WITH_SETJMP
9451/**
9452 * Fetches a data qword, longjmp on error.
9453 *
9454 * @returns The qword.
9455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9456 * @param iSegReg The index of the segment register to use for
9457 * this access. The base and limits are checked.
9458 * @param GCPtrMem The address of the guest memory.
9459 */
9460DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9461{
9462 /* The lazy approach for now... */
9463 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9464 uint64_t const u64Ret = *pu64Src;
9465 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9466 return u64Ret;
9467}
9468#endif
9469
9470
9471/**
9472 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9473 *
9474 * @returns Strict VBox status code.
9475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9476 * @param pu64Dst Where to return the qword.
9477 * @param iSegReg The index of the segment register to use for
9478 * this access. The base and limits are checked.
9479 * @param GCPtrMem The address of the guest memory.
9480 */
9481IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9482{
9483 /* The lazy approach for now... */
9484 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9485 if (RT_UNLIKELY(GCPtrMem & 15))
9486 return iemRaiseGeneralProtectionFault0(pVCpu);
9487
9488 uint64_t const *pu64Src;
9489 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9490 if (rc == VINF_SUCCESS)
9491 {
9492 *pu64Dst = *pu64Src;
9493 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9494 }
9495 return rc;
9496}
9497
9498
9499#ifdef IEM_WITH_SETJMP
9500/**
9501 * Fetches a data qword, longjmp on error.
9502 *
9503 * @returns The qword.
9504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9505 * @param iSegReg The index of the segment register to use for
9506 * this access. The base and limits are checked.
9507 * @param GCPtrMem The address of the guest memory.
9508 */
9509DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9510{
9511 /* The lazy approach for now... */
9512 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9513 if (RT_LIKELY(!(GCPtrMem & 15)))
9514 {
9515 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9516 uint64_t const u64Ret = *pu64Src;
9517 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9518 return u64Ret;
9519 }
9520
9521 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9522 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9523}
9524#endif
9525
9526
9527/**
9528 * Fetches a data tword.
9529 *
9530 * @returns Strict VBox status code.
9531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9532 * @param pr80Dst Where to return the tword.
9533 * @param iSegReg The index of the segment register to use for
9534 * this access. The base and limits are checked.
9535 * @param GCPtrMem The address of the guest memory.
9536 */
9537IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9538{
9539 /* The lazy approach for now... */
9540 PCRTFLOAT80U pr80Src;
9541 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9542 if (rc == VINF_SUCCESS)
9543 {
9544 *pr80Dst = *pr80Src;
9545 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9546 }
9547 return rc;
9548}
9549
9550
9551#ifdef IEM_WITH_SETJMP
9552/**
9553 * Fetches a data tword, longjmp on error.
9554 *
9555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9556 * @param pr80Dst Where to return the tword.
9557 * @param iSegReg The index of the segment register to use for
9558 * this access. The base and limits are checked.
9559 * @param GCPtrMem The address of the guest memory.
9560 */
9561DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9562{
9563 /* The lazy approach for now... */
9564 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9565 *pr80Dst = *pr80Src;
9566 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9567}
9568#endif
9569
9570
9571/**
9572 * Fetches a data dqword (double qword), generally SSE related.
9573 *
9574 * @returns Strict VBox status code.
9575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9576 * @param pu128Dst Where to return the qword.
9577 * @param iSegReg The index of the segment register to use for
9578 * this access. The base and limits are checked.
9579 * @param GCPtrMem The address of the guest memory.
9580 */
9581IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9582{
9583 /* The lazy approach for now... */
9584 PCRTUINT128U pu128Src;
9585 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9586 if (rc == VINF_SUCCESS)
9587 {
9588 pu128Dst->au64[0] = pu128Src->au64[0];
9589 pu128Dst->au64[1] = pu128Src->au64[1];
9590 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9591 }
9592 return rc;
9593}
9594
9595
9596#ifdef IEM_WITH_SETJMP
9597/**
9598 * Fetches a data dqword (double qword), generally SSE related.
9599 *
9600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9601 * @param pu128Dst Where to return the qword.
9602 * @param iSegReg The index of the segment register to use for
9603 * this access. The base and limits are checked.
9604 * @param GCPtrMem The address of the guest memory.
9605 */
9606IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9607{
9608 /* The lazy approach for now... */
9609 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9610 pu128Dst->au64[0] = pu128Src->au64[0];
9611 pu128Dst->au64[1] = pu128Src->au64[1];
9612 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9613}
9614#endif
9615
9616
9617/**
9618 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9619 * related.
9620 *
9621 * Raises \#GP(0) if not aligned.
9622 *
9623 * @returns Strict VBox status code.
9624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9625 * @param pu128Dst Where to return the qword.
9626 * @param iSegReg The index of the segment register to use for
9627 * this access. The base and limits are checked.
9628 * @param GCPtrMem The address of the guest memory.
9629 */
9630IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9631{
9632 /* The lazy approach for now... */
9633 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9634 if ( (GCPtrMem & 15)
9635 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9636 return iemRaiseGeneralProtectionFault0(pVCpu);
9637
9638 PCRTUINT128U pu128Src;
9639 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9640 if (rc == VINF_SUCCESS)
9641 {
9642 pu128Dst->au64[0] = pu128Src->au64[0];
9643 pu128Dst->au64[1] = pu128Src->au64[1];
9644 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9645 }
9646 return rc;
9647}
9648
9649
9650#ifdef IEM_WITH_SETJMP
9651/**
9652 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9653 * related, longjmp on error.
9654 *
9655 * Raises \#GP(0) if not aligned.
9656 *
9657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9658 * @param pu128Dst Where to return the qword.
9659 * @param iSegReg The index of the segment register to use for
9660 * this access. The base and limits are checked.
9661 * @param GCPtrMem The address of the guest memory.
9662 */
9663DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9664{
9665 /* The lazy approach for now... */
9666 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9667 if ( (GCPtrMem & 15) == 0
9668 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9669 {
9670 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9671 pu128Dst->au64[0] = pu128Src->au64[0];
9672 pu128Dst->au64[1] = pu128Src->au64[1];
9673 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9674 return;
9675 }
9676
9677 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9678 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9679}
9680#endif
9681
9682
9683/**
9684 * Fetches a data oword (octo word), generally AVX related.
9685 *
9686 * @returns Strict VBox status code.
9687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9688 * @param pu256Dst Where to return the qword.
9689 * @param iSegReg The index of the segment register to use for
9690 * this access. The base and limits are checked.
9691 * @param GCPtrMem The address of the guest memory.
9692 */
9693IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9694{
9695 /* The lazy approach for now... */
9696 PCRTUINT256U pu256Src;
9697 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9698 if (rc == VINF_SUCCESS)
9699 {
9700 pu256Dst->au64[0] = pu256Src->au64[0];
9701 pu256Dst->au64[1] = pu256Src->au64[1];
9702 pu256Dst->au64[2] = pu256Src->au64[2];
9703 pu256Dst->au64[3] = pu256Src->au64[3];
9704 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9705 }
9706 return rc;
9707}
9708
9709
9710#ifdef IEM_WITH_SETJMP
9711/**
9712 * Fetches a data oword (octo word), generally AVX related.
9713 *
9714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9715 * @param pu256Dst Where to return the qword.
9716 * @param iSegReg The index of the segment register to use for
9717 * this access. The base and limits are checked.
9718 * @param GCPtrMem The address of the guest memory.
9719 */
9720IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9721{
9722 /* The lazy approach for now... */
9723 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9724 pu256Dst->au64[0] = pu256Src->au64[0];
9725 pu256Dst->au64[1] = pu256Src->au64[1];
9726 pu256Dst->au64[2] = pu256Src->au64[2];
9727 pu256Dst->au64[3] = pu256Src->au64[3];
9728 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9729}
9730#endif
9731
9732
9733/**
9734 * Fetches a data oword (octo word) at an aligned address, generally AVX
9735 * related.
9736 *
9737 * Raises \#GP(0) if not aligned.
9738 *
9739 * @returns Strict VBox status code.
9740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9741 * @param pu256Dst Where to return the qword.
9742 * @param iSegReg The index of the segment register to use for
9743 * this access. The base and limits are checked.
9744 * @param GCPtrMem The address of the guest memory.
9745 */
9746IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9747{
9748 /* The lazy approach for now... */
9749 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9750 if (GCPtrMem & 31)
9751 return iemRaiseGeneralProtectionFault0(pVCpu);
9752
9753 PCRTUINT256U pu256Src;
9754 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9755 if (rc == VINF_SUCCESS)
9756 {
9757 pu256Dst->au64[0] = pu256Src->au64[0];
9758 pu256Dst->au64[1] = pu256Src->au64[1];
9759 pu256Dst->au64[2] = pu256Src->au64[2];
9760 pu256Dst->au64[3] = pu256Src->au64[3];
9761 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9762 }
9763 return rc;
9764}
9765
9766
9767#ifdef IEM_WITH_SETJMP
9768/**
9769 * Fetches a data oword (octo word) at an aligned address, generally AVX
9770 * related, longjmp on error.
9771 *
9772 * Raises \#GP(0) if not aligned.
9773 *
9774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9775 * @param pu256Dst Where to return the qword.
9776 * @param iSegReg The index of the segment register to use for
9777 * this access. The base and limits are checked.
9778 * @param GCPtrMem The address of the guest memory.
9779 */
9780DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9781{
9782 /* The lazy approach for now... */
9783 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9784 if ((GCPtrMem & 31) == 0)
9785 {
9786 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9787 pu256Dst->au64[0] = pu256Src->au64[0];
9788 pu256Dst->au64[1] = pu256Src->au64[1];
9789 pu256Dst->au64[2] = pu256Src->au64[2];
9790 pu256Dst->au64[3] = pu256Src->au64[3];
9791 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9792 return;
9793 }
9794
9795 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9796 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9797}
9798#endif
9799
9800
9801
9802/**
9803 * Fetches a descriptor register (lgdt, lidt).
9804 *
9805 * @returns Strict VBox status code.
9806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9807 * @param pcbLimit Where to return the limit.
9808 * @param pGCPtrBase Where to return the base.
9809 * @param iSegReg The index of the segment register to use for
9810 * this access. The base and limits are checked.
9811 * @param GCPtrMem The address of the guest memory.
9812 * @param enmOpSize The effective operand size.
9813 */
9814IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9815 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9816{
9817 /*
9818 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9819 * little special:
9820 * - The two reads are done separately.
9821 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9822 * - We suspect the 386 to actually commit the limit before the base in
9823 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9824 * don't try emulate this eccentric behavior, because it's not well
9825 * enough understood and rather hard to trigger.
9826 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9827 */
9828 VBOXSTRICTRC rcStrict;
9829 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9830 {
9831 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9832 if (rcStrict == VINF_SUCCESS)
9833 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9834 }
9835 else
9836 {
9837 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9838 if (enmOpSize == IEMMODE_32BIT)
9839 {
9840 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9841 {
9842 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9843 if (rcStrict == VINF_SUCCESS)
9844 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9845 }
9846 else
9847 {
9848 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9849 if (rcStrict == VINF_SUCCESS)
9850 {
9851 *pcbLimit = (uint16_t)uTmp;
9852 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9853 }
9854 }
9855 if (rcStrict == VINF_SUCCESS)
9856 *pGCPtrBase = uTmp;
9857 }
9858 else
9859 {
9860 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9861 if (rcStrict == VINF_SUCCESS)
9862 {
9863 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9864 if (rcStrict == VINF_SUCCESS)
9865 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9866 }
9867 }
9868 }
9869 return rcStrict;
9870}
9871
9872
9873
9874/**
9875 * Stores a data byte.
9876 *
9877 * @returns Strict VBox status code.
9878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9879 * @param iSegReg The index of the segment register to use for
9880 * this access. The base and limits are checked.
9881 * @param GCPtrMem The address of the guest memory.
9882 * @param u8Value The value to store.
9883 */
9884IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9885{
9886 /* The lazy approach for now... */
9887 uint8_t *pu8Dst;
9888 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9889 if (rc == VINF_SUCCESS)
9890 {
9891 *pu8Dst = u8Value;
9892 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9893 }
9894 return rc;
9895}
9896
9897
9898#ifdef IEM_WITH_SETJMP
9899/**
9900 * Stores a data byte, longjmp on error.
9901 *
9902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9903 * @param iSegReg The index of the segment register to use for
9904 * this access. The base and limits are checked.
9905 * @param GCPtrMem The address of the guest memory.
9906 * @param u8Value The value to store.
9907 */
9908IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9909{
9910 /* The lazy approach for now... */
9911 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9912 *pu8Dst = u8Value;
9913 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9914}
9915#endif
9916
9917
9918/**
9919 * Stores a data word.
9920 *
9921 * @returns Strict VBox status code.
9922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9923 * @param iSegReg The index of the segment register to use for
9924 * this access. The base and limits are checked.
9925 * @param GCPtrMem The address of the guest memory.
9926 * @param u16Value The value to store.
9927 */
9928IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9929{
9930 /* The lazy approach for now... */
9931 uint16_t *pu16Dst;
9932 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9933 if (rc == VINF_SUCCESS)
9934 {
9935 *pu16Dst = u16Value;
9936 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9937 }
9938 return rc;
9939}
9940
9941
9942#ifdef IEM_WITH_SETJMP
9943/**
9944 * Stores a data word, longjmp on error.
9945 *
9946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9947 * @param iSegReg The index of the segment register to use for
9948 * this access. The base and limits are checked.
9949 * @param GCPtrMem The address of the guest memory.
9950 * @param u16Value The value to store.
9951 */
9952IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9953{
9954 /* The lazy approach for now... */
9955 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9956 *pu16Dst = u16Value;
9957 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9958}
9959#endif
9960
9961
9962/**
9963 * Stores a data dword.
9964 *
9965 * @returns Strict VBox status code.
9966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9967 * @param iSegReg The index of the segment register to use for
9968 * this access. The base and limits are checked.
9969 * @param GCPtrMem The address of the guest memory.
9970 * @param u32Value The value to store.
9971 */
9972IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9973{
9974 /* The lazy approach for now... */
9975 uint32_t *pu32Dst;
9976 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9977 if (rc == VINF_SUCCESS)
9978 {
9979 *pu32Dst = u32Value;
9980 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9981 }
9982 return rc;
9983}
9984
9985
9986#ifdef IEM_WITH_SETJMP
9987/**
9988 * Stores a data dword.
9989 *
9990 * @returns Strict VBox status code.
9991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9992 * @param iSegReg The index of the segment register to use for
9993 * this access. The base and limits are checked.
9994 * @param GCPtrMem The address of the guest memory.
9995 * @param u32Value The value to store.
9996 */
9997IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9998{
9999 /* The lazy approach for now... */
10000 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10001 *pu32Dst = u32Value;
10002 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10003}
10004#endif
10005
10006
10007/**
10008 * Stores a data qword.
10009 *
10010 * @returns Strict VBox status code.
10011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10012 * @param iSegReg The index of the segment register to use for
10013 * this access. The base and limits are checked.
10014 * @param GCPtrMem The address of the guest memory.
10015 * @param u64Value The value to store.
10016 */
10017IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10018{
10019 /* The lazy approach for now... */
10020 uint64_t *pu64Dst;
10021 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10022 if (rc == VINF_SUCCESS)
10023 {
10024 *pu64Dst = u64Value;
10025 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10026 }
10027 return rc;
10028}
10029
10030
10031#ifdef IEM_WITH_SETJMP
10032/**
10033 * Stores a data qword, longjmp on error.
10034 *
10035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10036 * @param iSegReg The index of the segment register to use for
10037 * this access. The base and limits are checked.
10038 * @param GCPtrMem The address of the guest memory.
10039 * @param u64Value The value to store.
10040 */
10041IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10042{
10043 /* The lazy approach for now... */
10044 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10045 *pu64Dst = u64Value;
10046 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10047}
10048#endif
10049
10050
10051/**
10052 * Stores a data dqword.
10053 *
10054 * @returns Strict VBox status code.
10055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10056 * @param iSegReg The index of the segment register to use for
10057 * this access. The base and limits are checked.
10058 * @param GCPtrMem The address of the guest memory.
10059 * @param u128Value The value to store.
10060 */
10061IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10062{
10063 /* The lazy approach for now... */
10064 PRTUINT128U pu128Dst;
10065 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10066 if (rc == VINF_SUCCESS)
10067 {
10068 pu128Dst->au64[0] = u128Value.au64[0];
10069 pu128Dst->au64[1] = u128Value.au64[1];
10070 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10071 }
10072 return rc;
10073}
10074
10075
10076#ifdef IEM_WITH_SETJMP
10077/**
10078 * Stores a data dqword, longjmp on error.
10079 *
10080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10081 * @param iSegReg The index of the segment register to use for
10082 * this access. The base and limits are checked.
10083 * @param GCPtrMem The address of the guest memory.
10084 * @param u128Value The value to store.
10085 */
10086IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10087{
10088 /* The lazy approach for now... */
10089 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10090 pu128Dst->au64[0] = u128Value.au64[0];
10091 pu128Dst->au64[1] = u128Value.au64[1];
10092 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10093}
10094#endif
10095
10096
10097/**
10098 * Stores a data dqword, SSE aligned.
10099 *
10100 * @returns Strict VBox status code.
10101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10102 * @param iSegReg The index of the segment register to use for
10103 * this access. The base and limits are checked.
10104 * @param GCPtrMem The address of the guest memory.
10105 * @param u128Value The value to store.
10106 */
10107IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10108{
10109 /* The lazy approach for now... */
10110 if ( (GCPtrMem & 15)
10111 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10112 return iemRaiseGeneralProtectionFault0(pVCpu);
10113
10114 PRTUINT128U pu128Dst;
10115 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10116 if (rc == VINF_SUCCESS)
10117 {
10118 pu128Dst->au64[0] = u128Value.au64[0];
10119 pu128Dst->au64[1] = u128Value.au64[1];
10120 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10121 }
10122 return rc;
10123}
10124
10125
10126#ifdef IEM_WITH_SETJMP
10127/**
10128 * Stores a data dqword, SSE aligned.
10129 *
10130 * @returns Strict VBox status code.
10131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10132 * @param iSegReg The index of the segment register to use for
10133 * this access. The base and limits are checked.
10134 * @param GCPtrMem The address of the guest memory.
10135 * @param u128Value The value to store.
10136 */
10137DECL_NO_INLINE(IEM_STATIC, void)
10138iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10139{
10140 /* The lazy approach for now... */
10141 if ( (GCPtrMem & 15) == 0
10142 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10143 {
10144 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10145 pu128Dst->au64[0] = u128Value.au64[0];
10146 pu128Dst->au64[1] = u128Value.au64[1];
10147 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10148 return;
10149 }
10150
10151 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10152 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10153}
10154#endif
10155
10156
10157/**
10158 * Stores a data dqword.
10159 *
10160 * @returns Strict VBox status code.
10161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10162 * @param iSegReg The index of the segment register to use for
10163 * this access. The base and limits are checked.
10164 * @param GCPtrMem The address of the guest memory.
10165 * @param pu256Value Pointer to the value to store.
10166 */
10167IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10168{
10169 /* The lazy approach for now... */
10170 PRTUINT256U pu256Dst;
10171 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10172 if (rc == VINF_SUCCESS)
10173 {
10174 pu256Dst->au64[0] = pu256Value->au64[0];
10175 pu256Dst->au64[1] = pu256Value->au64[1];
10176 pu256Dst->au64[2] = pu256Value->au64[2];
10177 pu256Dst->au64[3] = pu256Value->au64[3];
10178 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10179 }
10180 return rc;
10181}
10182
10183
10184#ifdef IEM_WITH_SETJMP
10185/**
10186 * Stores a data dqword, longjmp on error.
10187 *
10188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10189 * @param iSegReg The index of the segment register to use for
10190 * this access. The base and limits are checked.
10191 * @param GCPtrMem The address of the guest memory.
10192 * @param pu256Value Pointer to the value to store.
10193 */
10194IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10195{
10196 /* The lazy approach for now... */
10197 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10198 pu256Dst->au64[0] = pu256Value->au64[0];
10199 pu256Dst->au64[1] = pu256Value->au64[1];
10200 pu256Dst->au64[2] = pu256Value->au64[2];
10201 pu256Dst->au64[3] = pu256Value->au64[3];
10202 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10203}
10204#endif
10205
10206
10207/**
10208 * Stores a data dqword, AVX aligned.
10209 *
10210 * @returns Strict VBox status code.
10211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10212 * @param iSegReg The index of the segment register to use for
10213 * this access. The base and limits are checked.
10214 * @param GCPtrMem The address of the guest memory.
10215 * @param pu256Value Pointer to the value to store.
10216 */
10217IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10218{
10219 /* The lazy approach for now... */
10220 if (GCPtrMem & 31)
10221 return iemRaiseGeneralProtectionFault0(pVCpu);
10222
10223 PRTUINT256U pu256Dst;
10224 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10225 if (rc == VINF_SUCCESS)
10226 {
10227 pu256Dst->au64[0] = pu256Value->au64[0];
10228 pu256Dst->au64[1] = pu256Value->au64[1];
10229 pu256Dst->au64[2] = pu256Value->au64[2];
10230 pu256Dst->au64[3] = pu256Value->au64[3];
10231 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10232 }
10233 return rc;
10234}
10235
10236
10237#ifdef IEM_WITH_SETJMP
10238/**
10239 * Stores a data dqword, AVX aligned.
10240 *
10241 * @returns Strict VBox status code.
10242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10243 * @param iSegReg The index of the segment register to use for
10244 * this access. The base and limits are checked.
10245 * @param GCPtrMem The address of the guest memory.
10246 * @param pu256Value Pointer to the value to store.
10247 */
10248DECL_NO_INLINE(IEM_STATIC, void)
10249iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10250{
10251 /* The lazy approach for now... */
10252 if ((GCPtrMem & 31) == 0)
10253 {
10254 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10255 pu256Dst->au64[0] = pu256Value->au64[0];
10256 pu256Dst->au64[1] = pu256Value->au64[1];
10257 pu256Dst->au64[2] = pu256Value->au64[2];
10258 pu256Dst->au64[3] = pu256Value->au64[3];
10259 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10260 return;
10261 }
10262
10263 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10264 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10265}
10266#endif
10267
10268
10269/**
10270 * Stores a descriptor register (sgdt, sidt).
10271 *
10272 * @returns Strict VBox status code.
10273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10274 * @param cbLimit The limit.
10275 * @param GCPtrBase The base address.
10276 * @param iSegReg The index of the segment register to use for
10277 * this access. The base and limits are checked.
10278 * @param GCPtrMem The address of the guest memory.
10279 */
10280IEM_STATIC VBOXSTRICTRC
10281iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10282{
10283 /*
10284 * The SIDT and SGDT instructions actually stores the data using two
10285 * independent writes. The instructions does not respond to opsize prefixes.
10286 */
10287 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10288 if (rcStrict == VINF_SUCCESS)
10289 {
10290 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10291 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10292 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10293 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10294 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10295 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10296 else
10297 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10298 }
10299 return rcStrict;
10300}
10301
10302
10303/**
10304 * Pushes a word onto the stack.
10305 *
10306 * @returns Strict VBox status code.
10307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10308 * @param u16Value The value to push.
10309 */
10310IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10311{
10312 /* Increment the stack pointer. */
10313 uint64_t uNewRsp;
10314 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10315
10316 /* Write the word the lazy way. */
10317 uint16_t *pu16Dst;
10318 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10319 if (rc == VINF_SUCCESS)
10320 {
10321 *pu16Dst = u16Value;
10322 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10323 }
10324
10325 /* Commit the new RSP value unless we an access handler made trouble. */
10326 if (rc == VINF_SUCCESS)
10327 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10328
10329 return rc;
10330}
10331
10332
10333/**
10334 * Pushes a dword onto the stack.
10335 *
10336 * @returns Strict VBox status code.
10337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10338 * @param u32Value The value to push.
10339 */
10340IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10341{
10342 /* Increment the stack pointer. */
10343 uint64_t uNewRsp;
10344 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10345
10346 /* Write the dword the lazy way. */
10347 uint32_t *pu32Dst;
10348 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10349 if (rc == VINF_SUCCESS)
10350 {
10351 *pu32Dst = u32Value;
10352 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10353 }
10354
10355 /* Commit the new RSP value unless we an access handler made trouble. */
10356 if (rc == VINF_SUCCESS)
10357 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10358
10359 return rc;
10360}
10361
10362
10363/**
10364 * Pushes a dword segment register value onto the stack.
10365 *
10366 * @returns Strict VBox status code.
10367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10368 * @param u32Value The value to push.
10369 */
10370IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10371{
10372 /* Increment the stack pointer. */
10373 uint64_t uNewRsp;
10374 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10375
10376 /* The intel docs talks about zero extending the selector register
10377 value. My actual intel CPU here might be zero extending the value
10378 but it still only writes the lower word... */
10379 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10380 * happens when crossing an electric page boundrary, is the high word checked
10381 * for write accessibility or not? Probably it is. What about segment limits?
10382 * It appears this behavior is also shared with trap error codes.
10383 *
10384 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10385 * ancient hardware when it actually did change. */
10386 uint16_t *pu16Dst;
10387 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10388 if (rc == VINF_SUCCESS)
10389 {
10390 *pu16Dst = (uint16_t)u32Value;
10391 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10392 }
10393
10394 /* Commit the new RSP value unless we an access handler made trouble. */
10395 if (rc == VINF_SUCCESS)
10396 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10397
10398 return rc;
10399}
10400
10401
10402/**
10403 * Pushes a qword onto the stack.
10404 *
10405 * @returns Strict VBox status code.
10406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10407 * @param u64Value The value to push.
10408 */
10409IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10410{
10411 /* Increment the stack pointer. */
10412 uint64_t uNewRsp;
10413 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10414
10415 /* Write the word the lazy way. */
10416 uint64_t *pu64Dst;
10417 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10418 if (rc == VINF_SUCCESS)
10419 {
10420 *pu64Dst = u64Value;
10421 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10422 }
10423
10424 /* Commit the new RSP value unless we an access handler made trouble. */
10425 if (rc == VINF_SUCCESS)
10426 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10427
10428 return rc;
10429}
10430
10431
10432/**
10433 * Pops a word from the stack.
10434 *
10435 * @returns Strict VBox status code.
10436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10437 * @param pu16Value Where to store the popped value.
10438 */
10439IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10440{
10441 /* Increment the stack pointer. */
10442 uint64_t uNewRsp;
10443 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10444
10445 /* Write the word the lazy way. */
10446 uint16_t const *pu16Src;
10447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10448 if (rc == VINF_SUCCESS)
10449 {
10450 *pu16Value = *pu16Src;
10451 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10452
10453 /* Commit the new RSP value. */
10454 if (rc == VINF_SUCCESS)
10455 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10456 }
10457
10458 return rc;
10459}
10460
10461
10462/**
10463 * Pops a dword from the stack.
10464 *
10465 * @returns Strict VBox status code.
10466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10467 * @param pu32Value Where to store the popped value.
10468 */
10469IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10470{
10471 /* Increment the stack pointer. */
10472 uint64_t uNewRsp;
10473 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10474
10475 /* Write the word the lazy way. */
10476 uint32_t const *pu32Src;
10477 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10478 if (rc == VINF_SUCCESS)
10479 {
10480 *pu32Value = *pu32Src;
10481 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10482
10483 /* Commit the new RSP value. */
10484 if (rc == VINF_SUCCESS)
10485 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10486 }
10487
10488 return rc;
10489}
10490
10491
10492/**
10493 * Pops a qword from the stack.
10494 *
10495 * @returns Strict VBox status code.
10496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10497 * @param pu64Value Where to store the popped value.
10498 */
10499IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10500{
10501 /* Increment the stack pointer. */
10502 uint64_t uNewRsp;
10503 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10504
10505 /* Write the word the lazy way. */
10506 uint64_t const *pu64Src;
10507 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10508 if (rc == VINF_SUCCESS)
10509 {
10510 *pu64Value = *pu64Src;
10511 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10512
10513 /* Commit the new RSP value. */
10514 if (rc == VINF_SUCCESS)
10515 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10516 }
10517
10518 return rc;
10519}
10520
10521
10522/**
10523 * Pushes a word onto the stack, using a temporary stack pointer.
10524 *
10525 * @returns Strict VBox status code.
10526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10527 * @param u16Value The value to push.
10528 * @param pTmpRsp Pointer to the temporary stack pointer.
10529 */
10530IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10531{
10532 /* Increment the stack pointer. */
10533 RTUINT64U NewRsp = *pTmpRsp;
10534 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10535
10536 /* Write the word the lazy way. */
10537 uint16_t *pu16Dst;
10538 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10539 if (rc == VINF_SUCCESS)
10540 {
10541 *pu16Dst = u16Value;
10542 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10543 }
10544
10545 /* Commit the new RSP value unless we an access handler made trouble. */
10546 if (rc == VINF_SUCCESS)
10547 *pTmpRsp = NewRsp;
10548
10549 return rc;
10550}
10551
10552
10553/**
10554 * Pushes a dword onto the stack, using a temporary stack pointer.
10555 *
10556 * @returns Strict VBox status code.
10557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10558 * @param u32Value The value to push.
10559 * @param pTmpRsp Pointer to the temporary stack pointer.
10560 */
10561IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10562{
10563 /* Increment the stack pointer. */
10564 RTUINT64U NewRsp = *pTmpRsp;
10565 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10566
10567 /* Write the word the lazy way. */
10568 uint32_t *pu32Dst;
10569 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10570 if (rc == VINF_SUCCESS)
10571 {
10572 *pu32Dst = u32Value;
10573 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10574 }
10575
10576 /* Commit the new RSP value unless we an access handler made trouble. */
10577 if (rc == VINF_SUCCESS)
10578 *pTmpRsp = NewRsp;
10579
10580 return rc;
10581}
10582
10583
10584/**
10585 * Pushes a dword onto the stack, using a temporary stack pointer.
10586 *
10587 * @returns Strict VBox status code.
10588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10589 * @param u64Value The value to push.
10590 * @param pTmpRsp Pointer to the temporary stack pointer.
10591 */
10592IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10593{
10594 /* Increment the stack pointer. */
10595 RTUINT64U NewRsp = *pTmpRsp;
10596 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10597
10598 /* Write the word the lazy way. */
10599 uint64_t *pu64Dst;
10600 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10601 if (rc == VINF_SUCCESS)
10602 {
10603 *pu64Dst = u64Value;
10604 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10605 }
10606
10607 /* Commit the new RSP value unless we an access handler made trouble. */
10608 if (rc == VINF_SUCCESS)
10609 *pTmpRsp = NewRsp;
10610
10611 return rc;
10612}
10613
10614
10615/**
10616 * Pops a word from the stack, using a temporary stack pointer.
10617 *
10618 * @returns Strict VBox status code.
10619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10620 * @param pu16Value Where to store the popped value.
10621 * @param pTmpRsp Pointer to the temporary stack pointer.
10622 */
10623IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10624{
10625 /* Increment the stack pointer. */
10626 RTUINT64U NewRsp = *pTmpRsp;
10627 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10628
10629 /* Write the word the lazy way. */
10630 uint16_t const *pu16Src;
10631 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10632 if (rc == VINF_SUCCESS)
10633 {
10634 *pu16Value = *pu16Src;
10635 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10636
10637 /* Commit the new RSP value. */
10638 if (rc == VINF_SUCCESS)
10639 *pTmpRsp = NewRsp;
10640 }
10641
10642 return rc;
10643}
10644
10645
10646/**
10647 * Pops a dword from the stack, using a temporary stack pointer.
10648 *
10649 * @returns Strict VBox status code.
10650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10651 * @param pu32Value Where to store the popped value.
10652 * @param pTmpRsp Pointer to the temporary stack pointer.
10653 */
10654IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10655{
10656 /* Increment the stack pointer. */
10657 RTUINT64U NewRsp = *pTmpRsp;
10658 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10659
10660 /* Write the word the lazy way. */
10661 uint32_t const *pu32Src;
10662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10663 if (rc == VINF_SUCCESS)
10664 {
10665 *pu32Value = *pu32Src;
10666 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10667
10668 /* Commit the new RSP value. */
10669 if (rc == VINF_SUCCESS)
10670 *pTmpRsp = NewRsp;
10671 }
10672
10673 return rc;
10674}
10675
10676
10677/**
10678 * Pops a qword from the stack, using a temporary stack pointer.
10679 *
10680 * @returns Strict VBox status code.
10681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10682 * @param pu64Value Where to store the popped value.
10683 * @param pTmpRsp Pointer to the temporary stack pointer.
10684 */
10685IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10686{
10687 /* Increment the stack pointer. */
10688 RTUINT64U NewRsp = *pTmpRsp;
10689 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10690
10691 /* Write the word the lazy way. */
10692 uint64_t const *pu64Src;
10693 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10694 if (rcStrict == VINF_SUCCESS)
10695 {
10696 *pu64Value = *pu64Src;
10697 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10698
10699 /* Commit the new RSP value. */
10700 if (rcStrict == VINF_SUCCESS)
10701 *pTmpRsp = NewRsp;
10702 }
10703
10704 return rcStrict;
10705}
10706
10707
10708/**
10709 * Begin a special stack push (used by interrupt, exceptions and such).
10710 *
10711 * This will raise \#SS or \#PF if appropriate.
10712 *
10713 * @returns Strict VBox status code.
10714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10715 * @param cbMem The number of bytes to push onto the stack.
10716 * @param ppvMem Where to return the pointer to the stack memory.
10717 * As with the other memory functions this could be
10718 * direct access or bounce buffered access, so
10719 * don't commit register until the commit call
10720 * succeeds.
10721 * @param puNewRsp Where to return the new RSP value. This must be
10722 * passed unchanged to
10723 * iemMemStackPushCommitSpecial().
10724 */
10725IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10726{
10727 Assert(cbMem < UINT8_MAX);
10728 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10729 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10730}
10731
10732
10733/**
10734 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10735 *
10736 * This will update the rSP.
10737 *
10738 * @returns Strict VBox status code.
10739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10740 * @param pvMem The pointer returned by
10741 * iemMemStackPushBeginSpecial().
10742 * @param uNewRsp The new RSP value returned by
10743 * iemMemStackPushBeginSpecial().
10744 */
10745IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10746{
10747 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10748 if (rcStrict == VINF_SUCCESS)
10749 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10750 return rcStrict;
10751}
10752
10753
10754/**
10755 * Begin a special stack pop (used by iret, retf and such).
10756 *
10757 * This will raise \#SS or \#PF if appropriate.
10758 *
10759 * @returns Strict VBox status code.
10760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10761 * @param cbMem The number of bytes to pop from the stack.
10762 * @param ppvMem Where to return the pointer to the stack memory.
10763 * @param puNewRsp Where to return the new RSP value. This must be
10764 * assigned to CPUMCTX::rsp manually some time
10765 * after iemMemStackPopDoneSpecial() has been
10766 * called.
10767 */
10768IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10769{
10770 Assert(cbMem < UINT8_MAX);
10771 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10772 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10773}
10774
10775
10776/**
10777 * Continue a special stack pop (used by iret and retf).
10778 *
10779 * This will raise \#SS or \#PF if appropriate.
10780 *
10781 * @returns Strict VBox status code.
10782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10783 * @param cbMem The number of bytes to pop from the stack.
10784 * @param ppvMem Where to return the pointer to the stack memory.
10785 * @param puNewRsp Where to return the new RSP value. This must be
10786 * assigned to CPUMCTX::rsp manually some time
10787 * after iemMemStackPopDoneSpecial() has been
10788 * called.
10789 */
10790IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10791{
10792 Assert(cbMem < UINT8_MAX);
10793 RTUINT64U NewRsp;
10794 NewRsp.u = *puNewRsp;
10795 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10796 *puNewRsp = NewRsp.u;
10797 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10798}
10799
10800
10801/**
10802 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10803 * iemMemStackPopContinueSpecial).
10804 *
10805 * The caller will manually commit the rSP.
10806 *
10807 * @returns Strict VBox status code.
10808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10809 * @param pvMem The pointer returned by
10810 * iemMemStackPopBeginSpecial() or
10811 * iemMemStackPopContinueSpecial().
10812 */
10813IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10814{
10815 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10816}
10817
10818
10819/**
10820 * Fetches a system table byte.
10821 *
10822 * @returns Strict VBox status code.
10823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10824 * @param pbDst Where to return the byte.
10825 * @param iSegReg The index of the segment register to use for
10826 * this access. The base and limits are checked.
10827 * @param GCPtrMem The address of the guest memory.
10828 */
10829IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10830{
10831 /* The lazy approach for now... */
10832 uint8_t const *pbSrc;
10833 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10834 if (rc == VINF_SUCCESS)
10835 {
10836 *pbDst = *pbSrc;
10837 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10838 }
10839 return rc;
10840}
10841
10842
10843/**
10844 * Fetches a system table word.
10845 *
10846 * @returns Strict VBox status code.
10847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10848 * @param pu16Dst Where to return the word.
10849 * @param iSegReg The index of the segment register to use for
10850 * this access. The base and limits are checked.
10851 * @param GCPtrMem The address of the guest memory.
10852 */
10853IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10854{
10855 /* The lazy approach for now... */
10856 uint16_t const *pu16Src;
10857 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10858 if (rc == VINF_SUCCESS)
10859 {
10860 *pu16Dst = *pu16Src;
10861 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10862 }
10863 return rc;
10864}
10865
10866
10867/**
10868 * Fetches a system table dword.
10869 *
10870 * @returns Strict VBox status code.
10871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10872 * @param pu32Dst Where to return the dword.
10873 * @param iSegReg The index of the segment register to use for
10874 * this access. The base and limits are checked.
10875 * @param GCPtrMem The address of the guest memory.
10876 */
10877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10878{
10879 /* The lazy approach for now... */
10880 uint32_t const *pu32Src;
10881 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10882 if (rc == VINF_SUCCESS)
10883 {
10884 *pu32Dst = *pu32Src;
10885 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10886 }
10887 return rc;
10888}
10889
10890
10891/**
10892 * Fetches a system table qword.
10893 *
10894 * @returns Strict VBox status code.
10895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10896 * @param pu64Dst Where to return the qword.
10897 * @param iSegReg The index of the segment register to use for
10898 * this access. The base and limits are checked.
10899 * @param GCPtrMem The address of the guest memory.
10900 */
10901IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10902{
10903 /* The lazy approach for now... */
10904 uint64_t const *pu64Src;
10905 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10906 if (rc == VINF_SUCCESS)
10907 {
10908 *pu64Dst = *pu64Src;
10909 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10910 }
10911 return rc;
10912}
10913
10914
10915/**
10916 * Fetches a descriptor table entry with caller specified error code.
10917 *
10918 * @returns Strict VBox status code.
10919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10920 * @param pDesc Where to return the descriptor table entry.
10921 * @param uSel The selector which table entry to fetch.
10922 * @param uXcpt The exception to raise on table lookup error.
10923 * @param uErrorCode The error code associated with the exception.
10924 */
10925IEM_STATIC VBOXSTRICTRC
10926iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10927{
10928 AssertPtr(pDesc);
10929 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10930
10931 /** @todo did the 286 require all 8 bytes to be accessible? */
10932 /*
10933 * Get the selector table base and check bounds.
10934 */
10935 RTGCPTR GCPtrBase;
10936 if (uSel & X86_SEL_LDT)
10937 {
10938 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10939 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10940 {
10941 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10942 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10943 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10944 uErrorCode, 0);
10945 }
10946
10947 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10948 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10949 }
10950 else
10951 {
10952 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10953 {
10954 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10955 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10956 uErrorCode, 0);
10957 }
10958 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10959 }
10960
10961 /*
10962 * Read the legacy descriptor and maybe the long mode extensions if
10963 * required.
10964 */
10965 VBOXSTRICTRC rcStrict;
10966 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10967 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10968 else
10969 {
10970 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10971 if (rcStrict == VINF_SUCCESS)
10972 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10973 if (rcStrict == VINF_SUCCESS)
10974 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10975 if (rcStrict == VINF_SUCCESS)
10976 pDesc->Legacy.au16[3] = 0;
10977 else
10978 return rcStrict;
10979 }
10980
10981 if (rcStrict == VINF_SUCCESS)
10982 {
10983 if ( !IEM_IS_LONG_MODE(pVCpu)
10984 || pDesc->Legacy.Gen.u1DescType)
10985 pDesc->Long.au64[1] = 0;
10986 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10987 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10988 else
10989 {
10990 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10991 /** @todo is this the right exception? */
10992 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10993 }
10994 }
10995 return rcStrict;
10996}
10997
10998
10999/**
11000 * Fetches a descriptor table entry.
11001 *
11002 * @returns Strict VBox status code.
11003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11004 * @param pDesc Where to return the descriptor table entry.
11005 * @param uSel The selector which table entry to fetch.
11006 * @param uXcpt The exception to raise on table lookup error.
11007 */
11008IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11009{
11010 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11011}
11012
11013
11014/**
11015 * Fakes a long mode stack selector for SS = 0.
11016 *
11017 * @param pDescSs Where to return the fake stack descriptor.
11018 * @param uDpl The DPL we want.
11019 */
11020IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11021{
11022 pDescSs->Long.au64[0] = 0;
11023 pDescSs->Long.au64[1] = 0;
11024 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11025 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11026 pDescSs->Long.Gen.u2Dpl = uDpl;
11027 pDescSs->Long.Gen.u1Present = 1;
11028 pDescSs->Long.Gen.u1Long = 1;
11029}
11030
11031
11032/**
11033 * Marks the selector descriptor as accessed (only non-system descriptors).
11034 *
11035 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11036 * will therefore skip the limit checks.
11037 *
11038 * @returns Strict VBox status code.
11039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11040 * @param uSel The selector.
11041 */
11042IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11043{
11044 /*
11045 * Get the selector table base and calculate the entry address.
11046 */
11047 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11048 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11049 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11050 GCPtr += uSel & X86_SEL_MASK;
11051
11052 /*
11053 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11054 * ugly stuff to avoid this. This will make sure it's an atomic access
11055 * as well more or less remove any question about 8-bit or 32-bit accesss.
11056 */
11057 VBOXSTRICTRC rcStrict;
11058 uint32_t volatile *pu32;
11059 if ((GCPtr & 3) == 0)
11060 {
11061 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11062 GCPtr += 2 + 2;
11063 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11064 if (rcStrict != VINF_SUCCESS)
11065 return rcStrict;
11066 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11067 }
11068 else
11069 {
11070 /* The misaligned GDT/LDT case, map the whole thing. */
11071 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11072 if (rcStrict != VINF_SUCCESS)
11073 return rcStrict;
11074 switch ((uintptr_t)pu32 & 3)
11075 {
11076 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11077 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11078 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11079 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11080 }
11081 }
11082
11083 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11084}
11085
11086/** @} */
11087
11088
11089/*
11090 * Include the C/C++ implementation of instruction.
11091 */
11092#include "IEMAllCImpl.cpp.h"
11093
11094
11095
11096/** @name "Microcode" macros.
11097 *
11098 * The idea is that we should be able to use the same code to interpret
11099 * instructions as well as recompiler instructions. Thus this obfuscation.
11100 *
11101 * @{
11102 */
11103#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11104#define IEM_MC_END() }
11105#define IEM_MC_PAUSE() do {} while (0)
11106#define IEM_MC_CONTINUE() do {} while (0)
11107
11108/** Internal macro. */
11109#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11110 do \
11111 { \
11112 VBOXSTRICTRC rcStrict2 = a_Expr; \
11113 if (rcStrict2 != VINF_SUCCESS) \
11114 return rcStrict2; \
11115 } while (0)
11116
11117
11118#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11119#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11120#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11121#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11122#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11123#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11124#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11125#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11126#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11127 do { \
11128 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11129 return iemRaiseDeviceNotAvailable(pVCpu); \
11130 } while (0)
11131#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11132 do { \
11133 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11134 return iemRaiseDeviceNotAvailable(pVCpu); \
11135 } while (0)
11136#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11137 do { \
11138 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11139 return iemRaiseMathFault(pVCpu); \
11140 } while (0)
11141#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11142 do { \
11143 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11144 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11145 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11146 return iemRaiseUndefinedOpcode(pVCpu); \
11147 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11148 return iemRaiseDeviceNotAvailable(pVCpu); \
11149 } while (0)
11150#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11151 do { \
11152 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11153 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11154 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11155 return iemRaiseUndefinedOpcode(pVCpu); \
11156 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11157 return iemRaiseDeviceNotAvailable(pVCpu); \
11158 } while (0)
11159#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11160 do { \
11161 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11162 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11163 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11164 return iemRaiseUndefinedOpcode(pVCpu); \
11165 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11166 return iemRaiseDeviceNotAvailable(pVCpu); \
11167 } while (0)
11168#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11169 do { \
11170 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11171 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11172 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11173 return iemRaiseUndefinedOpcode(pVCpu); \
11174 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11175 return iemRaiseDeviceNotAvailable(pVCpu); \
11176 } while (0)
11177#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11178 do { \
11179 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11180 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11181 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11182 return iemRaiseUndefinedOpcode(pVCpu); \
11183 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11184 return iemRaiseDeviceNotAvailable(pVCpu); \
11185 } while (0)
11186#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11187 do { \
11188 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11189 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11190 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11191 return iemRaiseUndefinedOpcode(pVCpu); \
11192 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11193 return iemRaiseDeviceNotAvailable(pVCpu); \
11194 } while (0)
11195#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11196 do { \
11197 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11198 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11199 return iemRaiseUndefinedOpcode(pVCpu); \
11200 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11201 return iemRaiseDeviceNotAvailable(pVCpu); \
11202 } while (0)
11203#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11204 do { \
11205 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11206 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11207 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11208 return iemRaiseUndefinedOpcode(pVCpu); \
11209 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11210 return iemRaiseDeviceNotAvailable(pVCpu); \
11211 } while (0)
11212#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11213 do { \
11214 if (pVCpu->iem.s.uCpl != 0) \
11215 return iemRaiseGeneralProtectionFault0(pVCpu); \
11216 } while (0)
11217#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11218 do { \
11219 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11220 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11221 } while (0)
11222#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11223 do { \
11224 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11225 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11226 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11227 return iemRaiseUndefinedOpcode(pVCpu); \
11228 } while (0)
11229#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11230 do { \
11231 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11232 return iemRaiseGeneralProtectionFault0(pVCpu); \
11233 } while (0)
11234
11235
11236#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11237#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11238#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11239#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11240#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11241#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11242#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11243 uint32_t a_Name; \
11244 uint32_t *a_pName = &a_Name
11245#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11246 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11247
11248#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11249#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11250
11251#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11252#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11253#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11254#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11255#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11256#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11257#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11258#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11259#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11260#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11261#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11262#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11263#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11264#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11265#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11266#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11267#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11268#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11269 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11270 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11271 } while (0)
11272#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11273 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11274 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11275 } while (0)
11276#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11277 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11278 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11279 } while (0)
11280/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11281#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11282 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11283 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11284 } while (0)
11285#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11286 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11287 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11288 } while (0)
11289/** @note Not for IOPL or IF testing or modification. */
11290#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11291#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11292#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11293#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11294
11295#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11296#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11297#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11298#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11299#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11300#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11301#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11302#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11303#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11304#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11305/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11306#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11307 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11308 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11309 } while (0)
11310#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11311 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11312 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11313 } while (0)
11314#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11315 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11316
11317
11318#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11319#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11320/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11321 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11322#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11323#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11324/** @note Not for IOPL or IF testing or modification. */
11325#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11326
11327#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11328#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11329#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11330 do { \
11331 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11332 *pu32Reg += (a_u32Value); \
11333 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11334 } while (0)
11335#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11336
11337#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11338#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11339#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11340 do { \
11341 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11342 *pu32Reg -= (a_u32Value); \
11343 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11344 } while (0)
11345#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11346#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11347
11348#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11349#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11350#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11351#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11352#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11353#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11354#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11355
11356#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11357#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11358#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11359#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11360
11361#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11362#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11363#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11364
11365#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11366#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11367#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11368
11369#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11370#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11371#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11372
11373#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11374#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11375#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11376
11377#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11378
11379#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11380
11381#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11382#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11383#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11384 do { \
11385 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11386 *pu32Reg &= (a_u32Value); \
11387 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11388 } while (0)
11389#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11390
11391#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11392#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11393#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11394 do { \
11395 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11396 *pu32Reg |= (a_u32Value); \
11397 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11398 } while (0)
11399#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11400
11401
11402/** @note Not for IOPL or IF modification. */
11403#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11404/** @note Not for IOPL or IF modification. */
11405#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11406/** @note Not for IOPL or IF modification. */
11407#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11408
11409#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11410
11411/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11412#define IEM_MC_FPU_TO_MMX_MODE() do { \
11413 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11414 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11415 } while (0)
11416
11417/** Switches the FPU state from MMX mode (FTW=0xffff). */
11418#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11419 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11420 } while (0)
11421
11422#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11423 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11424#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11425 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11426#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11427 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11428 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11429 } while (0)
11430#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11431 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11432 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11433 } while (0)
11434#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11435 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11436#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11437 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11438#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11439 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11440
11441#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11442 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11443 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11444 } while (0)
11445#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11446 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11447#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11448 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11449#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11450 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11451#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11452 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11453 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11454 } while (0)
11455#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11456 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11457#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11458 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11459 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11460 } while (0)
11461#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11462 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11463#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11464 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11465 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11466 } while (0)
11467#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11468 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11469#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11470 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11471#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11472 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11473#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11474 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11475#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11476 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11477 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11478 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11479 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11480 } while (0)
11481
11482#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11483 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11484 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11485 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11486 } while (0)
11487#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11488 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11489 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11490 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11491 } while (0)
11492#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11493 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11494 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11495 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11496 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11497 } while (0)
11498#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11499 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11500 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11501 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11502 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11503 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11504 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11505 } while (0)
11506
11507#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11508#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11509 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11510 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11511 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11512 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11513 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11514 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11515 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11516 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11517 } while (0)
11518#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11519 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11520 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11521 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11522 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11523 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11524 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11525 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11526 } while (0)
11527#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11528 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11529 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11530 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11532 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11533 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11534 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11535 } while (0)
11536#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11537 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11538 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11539 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11540 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11541 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11543 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11544 } while (0)
11545
11546#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11547 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11548#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11549 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11550#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11551 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11552#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11553 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11554 uintptr_t const iYRegTmp = (a_iYReg); \
11555 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11556 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11557 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11558 } while (0)
11559
11560#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11561 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11562 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11563 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11564 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11566 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11568 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11569 } while (0)
11570#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11571 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11572 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11573 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11574 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11575 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11577 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11578 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11579 } while (0)
11580#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11581 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11582 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11583 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11584 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11586 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11588 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11589 } while (0)
11590
11591#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11592 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11593 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11594 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11595 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11596 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11597 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11598 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11599 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11600 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11601 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11602 } while (0)
11603#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11604 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11605 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11606 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11607 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11608 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11609 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11610 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11611 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11612 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11613 } while (0)
11614#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11615 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11616 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11617 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11618 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11619 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11620 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11621 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11622 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11623 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11624 } while (0)
11625#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11626 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11627 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11628 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11629 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11630 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11631 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11632 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11633 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11634 } while (0)
11635
11636#ifndef IEM_WITH_SETJMP
11637# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11638 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11639# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11640 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11641# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11643#else
11644# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11645 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11646# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11647 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11648# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11649 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11650#endif
11651
11652#ifndef IEM_WITH_SETJMP
11653# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11655# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11657# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11659#else
11660# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11661 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11663 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11664# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11665 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11666#endif
11667
11668#ifndef IEM_WITH_SETJMP
11669# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11671# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11673# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11675#else
11676# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11677 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11679 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11680# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11681 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11682#endif
11683
11684#ifdef SOME_UNUSED_FUNCTION
11685# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11687#endif
11688
11689#ifndef IEM_WITH_SETJMP
11690# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11692# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11694# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11696# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11698#else
11699# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11700 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11701# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11702 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11703# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11704 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11706 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11707#endif
11708
11709#ifndef IEM_WITH_SETJMP
11710# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11714# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11716#else
11717# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11718 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11719# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11720 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11721# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11722 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11723#endif
11724
11725#ifndef IEM_WITH_SETJMP
11726# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11730#else
11731# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11732 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11733# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11734 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11735#endif
11736
11737#ifndef IEM_WITH_SETJMP
11738# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11740# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11742#else
11743# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11744 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11745# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11746 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11747#endif
11748
11749
11750
11751#ifndef IEM_WITH_SETJMP
11752# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11753 do { \
11754 uint8_t u8Tmp; \
11755 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11756 (a_u16Dst) = u8Tmp; \
11757 } while (0)
11758# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11759 do { \
11760 uint8_t u8Tmp; \
11761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11762 (a_u32Dst) = u8Tmp; \
11763 } while (0)
11764# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11765 do { \
11766 uint8_t u8Tmp; \
11767 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11768 (a_u64Dst) = u8Tmp; \
11769 } while (0)
11770# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11771 do { \
11772 uint16_t u16Tmp; \
11773 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11774 (a_u32Dst) = u16Tmp; \
11775 } while (0)
11776# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11777 do { \
11778 uint16_t u16Tmp; \
11779 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11780 (a_u64Dst) = u16Tmp; \
11781 } while (0)
11782# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11783 do { \
11784 uint32_t u32Tmp; \
11785 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11786 (a_u64Dst) = u32Tmp; \
11787 } while (0)
11788#else /* IEM_WITH_SETJMP */
11789# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11790 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11791# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11792 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11793# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11794 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11795# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11796 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11797# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11798 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11799# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11800 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11801#endif /* IEM_WITH_SETJMP */
11802
11803#ifndef IEM_WITH_SETJMP
11804# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11805 do { \
11806 uint8_t u8Tmp; \
11807 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11808 (a_u16Dst) = (int8_t)u8Tmp; \
11809 } while (0)
11810# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11811 do { \
11812 uint8_t u8Tmp; \
11813 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11814 (a_u32Dst) = (int8_t)u8Tmp; \
11815 } while (0)
11816# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11817 do { \
11818 uint8_t u8Tmp; \
11819 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11820 (a_u64Dst) = (int8_t)u8Tmp; \
11821 } while (0)
11822# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11823 do { \
11824 uint16_t u16Tmp; \
11825 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11826 (a_u32Dst) = (int16_t)u16Tmp; \
11827 } while (0)
11828# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11829 do { \
11830 uint16_t u16Tmp; \
11831 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11832 (a_u64Dst) = (int16_t)u16Tmp; \
11833 } while (0)
11834# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11835 do { \
11836 uint32_t u32Tmp; \
11837 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11838 (a_u64Dst) = (int32_t)u32Tmp; \
11839 } while (0)
11840#else /* IEM_WITH_SETJMP */
11841# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11842 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11843# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11844 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11845# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11846 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11847# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11848 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11849# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11850 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11851# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11852 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11853#endif /* IEM_WITH_SETJMP */
11854
11855#ifndef IEM_WITH_SETJMP
11856# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11858# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11860# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11862# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11863 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11864#else
11865# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11866 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11867# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11868 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11869# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11870 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11871# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11872 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11873#endif
11874
11875#ifndef IEM_WITH_SETJMP
11876# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11878# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11880# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11882# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11883 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11884#else
11885# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11886 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11887# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11888 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11889# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11890 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11891# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11892 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11893#endif
11894
11895#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11896#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11897#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11898#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11899#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11900#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11901#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11902 do { \
11903 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11904 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11905 } while (0)
11906
11907#ifndef IEM_WITH_SETJMP
11908# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11910# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11911 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11912#else
11913# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11914 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11915# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11916 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11917#endif
11918
11919#ifndef IEM_WITH_SETJMP
11920# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11922# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11923 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11924#else
11925# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11926 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11927# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11928 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11929#endif
11930
11931
11932#define IEM_MC_PUSH_U16(a_u16Value) \
11933 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11934#define IEM_MC_PUSH_U32(a_u32Value) \
11935 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11936#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11937 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11938#define IEM_MC_PUSH_U64(a_u64Value) \
11939 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11940
11941#define IEM_MC_POP_U16(a_pu16Value) \
11942 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11943#define IEM_MC_POP_U32(a_pu32Value) \
11944 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11945#define IEM_MC_POP_U64(a_pu64Value) \
11946 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11947
11948/** Maps guest memory for direct or bounce buffered access.
11949 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11950 * @remarks May return.
11951 */
11952#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11953 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11954
11955/** Maps guest memory for direct or bounce buffered access.
11956 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11957 * @remarks May return.
11958 */
11959#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11960 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11961
11962/** Commits the memory and unmaps the guest memory.
11963 * @remarks May return.
11964 */
11965#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11966 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11967
11968/** Commits the memory and unmaps the guest memory unless the FPU status word
11969 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11970 * that would cause FLD not to store.
11971 *
11972 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11973 * store, while \#P will not.
11974 *
11975 * @remarks May in theory return - for now.
11976 */
11977#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11978 do { \
11979 if ( !(a_u16FSW & X86_FSW_ES) \
11980 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11981 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11982 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11983 } while (0)
11984
11985/** Calculate efficient address from R/M. */
11986#ifndef IEM_WITH_SETJMP
11987# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11988 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11989#else
11990# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11991 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11992#endif
11993
11994#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11995#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11996#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11997#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11998#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11999#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12000#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12001
12002/**
12003 * Defers the rest of the instruction emulation to a C implementation routine
12004 * and returns, only taking the standard parameters.
12005 *
12006 * @param a_pfnCImpl The pointer to the C routine.
12007 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12008 */
12009#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12010
12011/**
12012 * Defers the rest of instruction emulation to a C implementation routine and
12013 * returns, taking one argument in addition to the standard ones.
12014 *
12015 * @param a_pfnCImpl The pointer to the C routine.
12016 * @param a0 The argument.
12017 */
12018#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12019
12020/**
12021 * Defers the rest of the instruction emulation to a C implementation routine
12022 * and returns, taking two arguments in addition to the standard ones.
12023 *
12024 * @param a_pfnCImpl The pointer to the C routine.
12025 * @param a0 The first extra argument.
12026 * @param a1 The second extra argument.
12027 */
12028#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12029
12030/**
12031 * Defers the rest of the instruction emulation to a C implementation routine
12032 * and returns, taking three arguments in addition to the standard ones.
12033 *
12034 * @param a_pfnCImpl The pointer to the C routine.
12035 * @param a0 The first extra argument.
12036 * @param a1 The second extra argument.
12037 * @param a2 The third extra argument.
12038 */
12039#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12040
12041/**
12042 * Defers the rest of the instruction emulation to a C implementation routine
12043 * and returns, taking four arguments in addition to the standard ones.
12044 *
12045 * @param a_pfnCImpl The pointer to the C routine.
12046 * @param a0 The first extra argument.
12047 * @param a1 The second extra argument.
12048 * @param a2 The third extra argument.
12049 * @param a3 The fourth extra argument.
12050 */
12051#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12052
12053/**
12054 * Defers the rest of the instruction emulation to a C implementation routine
12055 * and returns, taking two arguments in addition to the standard ones.
12056 *
12057 * @param a_pfnCImpl The pointer to the C routine.
12058 * @param a0 The first extra argument.
12059 * @param a1 The second extra argument.
12060 * @param a2 The third extra argument.
12061 * @param a3 The fourth extra argument.
12062 * @param a4 The fifth extra argument.
12063 */
12064#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12065
12066/**
12067 * Defers the entire instruction emulation to a C implementation routine and
12068 * returns, only taking the standard parameters.
12069 *
12070 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12071 *
12072 * @param a_pfnCImpl The pointer to the C routine.
12073 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12074 */
12075#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12076
12077/**
12078 * Defers the entire instruction emulation to a C implementation routine and
12079 * returns, taking one argument in addition to the standard ones.
12080 *
12081 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12082 *
12083 * @param a_pfnCImpl The pointer to the C routine.
12084 * @param a0 The argument.
12085 */
12086#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12087
12088/**
12089 * Defers the entire instruction emulation to a C implementation routine and
12090 * returns, taking two arguments in addition to the standard ones.
12091 *
12092 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12093 *
12094 * @param a_pfnCImpl The pointer to the C routine.
12095 * @param a0 The first extra argument.
12096 * @param a1 The second extra argument.
12097 */
12098#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12099
12100/**
12101 * Defers the entire instruction emulation to a C implementation routine and
12102 * returns, taking three arguments in addition to the standard ones.
12103 *
12104 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12105 *
12106 * @param a_pfnCImpl The pointer to the C routine.
12107 * @param a0 The first extra argument.
12108 * @param a1 The second extra argument.
12109 * @param a2 The third extra argument.
12110 */
12111#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12112
12113/**
12114 * Calls a FPU assembly implementation taking one visible argument.
12115 *
12116 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12117 * @param a0 The first extra argument.
12118 */
12119#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12120 do { \
12121 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12122 } while (0)
12123
12124/**
12125 * Calls a FPU assembly implementation taking two visible arguments.
12126 *
12127 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12128 * @param a0 The first extra argument.
12129 * @param a1 The second extra argument.
12130 */
12131#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12132 do { \
12133 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12134 } while (0)
12135
12136/**
12137 * Calls a FPU assembly implementation taking three visible arguments.
12138 *
12139 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12140 * @param a0 The first extra argument.
12141 * @param a1 The second extra argument.
12142 * @param a2 The third extra argument.
12143 */
12144#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12145 do { \
12146 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12147 } while (0)
12148
12149#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12150 do { \
12151 (a_FpuData).FSW = (a_FSW); \
12152 (a_FpuData).r80Result = *(a_pr80Value); \
12153 } while (0)
12154
12155/** Pushes FPU result onto the stack. */
12156#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12157 iemFpuPushResult(pVCpu, &a_FpuData)
12158/** Pushes FPU result onto the stack and sets the FPUDP. */
12159#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12160 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12161
12162/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12163#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12164 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12165
12166/** Stores FPU result in a stack register. */
12167#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12168 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12169/** Stores FPU result in a stack register and pops the stack. */
12170#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12171 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12172/** Stores FPU result in a stack register and sets the FPUDP. */
12173#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12174 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12175/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12176 * stack. */
12177#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12178 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12179
12180/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12181#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12182 iemFpuUpdateOpcodeAndIp(pVCpu)
12183/** Free a stack register (for FFREE and FFREEP). */
12184#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12185 iemFpuStackFree(pVCpu, a_iStReg)
12186/** Increment the FPU stack pointer. */
12187#define IEM_MC_FPU_STACK_INC_TOP() \
12188 iemFpuStackIncTop(pVCpu)
12189/** Decrement the FPU stack pointer. */
12190#define IEM_MC_FPU_STACK_DEC_TOP() \
12191 iemFpuStackDecTop(pVCpu)
12192
12193/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12194#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12195 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12196/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12197#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12198 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12199/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12200#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12201 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12202/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12203#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12204 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12205/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12206 * stack. */
12207#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12208 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12209/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12210#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12211 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12212
12213/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12214#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12215 iemFpuStackUnderflow(pVCpu, a_iStDst)
12216/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12217 * stack. */
12218#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12219 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12220/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12221 * FPUDS. */
12222#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12223 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12224/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12225 * FPUDS. Pops stack. */
12226#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12227 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12228/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12229 * stack twice. */
12230#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12231 iemFpuStackUnderflowThenPopPop(pVCpu)
12232/** Raises a FPU stack underflow exception for an instruction pushing a result
12233 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12234#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12235 iemFpuStackPushUnderflow(pVCpu)
12236/** Raises a FPU stack underflow exception for an instruction pushing a result
12237 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12238#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12239 iemFpuStackPushUnderflowTwo(pVCpu)
12240
12241/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12242 * FPUIP, FPUCS and FOP. */
12243#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12244 iemFpuStackPushOverflow(pVCpu)
12245/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12246 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12247#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12248 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12249/** Prepares for using the FPU state.
12250 * Ensures that we can use the host FPU in the current context (RC+R0.
12251 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12252#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12253/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12254#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12255/** Actualizes the guest FPU state so it can be accessed and modified. */
12256#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12257
12258/** Prepares for using the SSE state.
12259 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12260 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12261#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12262/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12263#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12264/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12265#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12266
12267/** Prepares for using the AVX state.
12268 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12269 * Ensures the guest AVX state in the CPUMCTX is up to date.
12270 * @note This will include the AVX512 state too when support for it is added
12271 * due to the zero extending feature of VEX instruction. */
12272#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12273/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12274#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12275/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12276#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12277
12278/**
12279 * Calls a MMX assembly implementation taking two visible arguments.
12280 *
12281 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12282 * @param a0 The first extra argument.
12283 * @param a1 The second extra argument.
12284 */
12285#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12286 do { \
12287 IEM_MC_PREPARE_FPU_USAGE(); \
12288 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12289 } while (0)
12290
12291/**
12292 * Calls a MMX assembly implementation taking three visible arguments.
12293 *
12294 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12295 * @param a0 The first extra argument.
12296 * @param a1 The second extra argument.
12297 * @param a2 The third extra argument.
12298 */
12299#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12300 do { \
12301 IEM_MC_PREPARE_FPU_USAGE(); \
12302 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12303 } while (0)
12304
12305
12306/**
12307 * Calls a SSE assembly implementation taking two visible arguments.
12308 *
12309 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12310 * @param a0 The first extra argument.
12311 * @param a1 The second extra argument.
12312 */
12313#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12314 do { \
12315 IEM_MC_PREPARE_SSE_USAGE(); \
12316 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12317 } while (0)
12318
12319/**
12320 * Calls a SSE assembly implementation taking three visible arguments.
12321 *
12322 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12323 * @param a0 The first extra argument.
12324 * @param a1 The second extra argument.
12325 * @param a2 The third extra argument.
12326 */
12327#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12328 do { \
12329 IEM_MC_PREPARE_SSE_USAGE(); \
12330 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12331 } while (0)
12332
12333
12334/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12335 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12336#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12337 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12338
12339/**
12340 * Calls a AVX assembly implementation taking two visible arguments.
12341 *
12342 * There is one implicit zero'th argument, a pointer to the extended state.
12343 *
12344 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12345 * @param a1 The first extra argument.
12346 * @param a2 The second extra argument.
12347 */
12348#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12349 do { \
12350 IEM_MC_PREPARE_AVX_USAGE(); \
12351 a_pfnAImpl(pXState, (a1), (a2)); \
12352 } while (0)
12353
12354/**
12355 * Calls a AVX assembly implementation taking three visible arguments.
12356 *
12357 * There is one implicit zero'th argument, a pointer to the extended state.
12358 *
12359 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12360 * @param a1 The first extra argument.
12361 * @param a2 The second extra argument.
12362 * @param a3 The third extra argument.
12363 */
12364#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12365 do { \
12366 IEM_MC_PREPARE_AVX_USAGE(); \
12367 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12368 } while (0)
12369
12370/** @note Not for IOPL or IF testing. */
12371#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12372/** @note Not for IOPL or IF testing. */
12373#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12374/** @note Not for IOPL or IF testing. */
12375#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12376/** @note Not for IOPL or IF testing. */
12377#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12378/** @note Not for IOPL or IF testing. */
12379#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12380 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12381 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12382/** @note Not for IOPL or IF testing. */
12383#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12384 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12385 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12386/** @note Not for IOPL or IF testing. */
12387#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12388 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12389 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12390 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12391/** @note Not for IOPL or IF testing. */
12392#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12393 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12394 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12395 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12396#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12397#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12398#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12399/** @note Not for IOPL or IF testing. */
12400#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12401 if ( pVCpu->cpum.GstCtx.cx != 0 \
12402 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12403/** @note Not for IOPL or IF testing. */
12404#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12405 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12406 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12407/** @note Not for IOPL or IF testing. */
12408#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12409 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12410 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12411/** @note Not for IOPL or IF testing. */
12412#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12413 if ( pVCpu->cpum.GstCtx.cx != 0 \
12414 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12415/** @note Not for IOPL or IF testing. */
12416#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12417 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12418 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12421 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12422 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12423#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12424#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12425
12426#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12427 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12428#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12429 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12430#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12431 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12432#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12433 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12434#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12435 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12436#define IEM_MC_IF_FCW_IM() \
12437 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12438
12439#define IEM_MC_ELSE() } else {
12440#define IEM_MC_ENDIF() } do {} while (0)
12441
12442/** @} */
12443
12444
12445/** @name Opcode Debug Helpers.
12446 * @{
12447 */
12448#ifdef VBOX_WITH_STATISTICS
12449# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12450#else
12451# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12452#endif
12453
12454#ifdef DEBUG
12455# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12456 do { \
12457 IEMOP_INC_STATS(a_Stats); \
12458 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12459 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12460 } while (0)
12461
12462# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12463 do { \
12464 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12465 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12466 (void)RT_CONCAT(OP_,a_Upper); \
12467 (void)(a_fDisHints); \
12468 (void)(a_fIemHints); \
12469 } while (0)
12470
12471# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12472 do { \
12473 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12474 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12475 (void)RT_CONCAT(OP_,a_Upper); \
12476 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12477 (void)(a_fDisHints); \
12478 (void)(a_fIemHints); \
12479 } while (0)
12480
12481# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12482 do { \
12483 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12484 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12485 (void)RT_CONCAT(OP_,a_Upper); \
12486 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12487 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12488 (void)(a_fDisHints); \
12489 (void)(a_fIemHints); \
12490 } while (0)
12491
12492# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12493 do { \
12494 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12495 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12496 (void)RT_CONCAT(OP_,a_Upper); \
12497 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12498 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12499 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12500 (void)(a_fDisHints); \
12501 (void)(a_fIemHints); \
12502 } while (0)
12503
12504# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12505 do { \
12506 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12507 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12508 (void)RT_CONCAT(OP_,a_Upper); \
12509 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12510 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12511 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12512 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12513 (void)(a_fDisHints); \
12514 (void)(a_fIemHints); \
12515 } while (0)
12516
12517#else
12518# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12519
12520# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12521 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12522# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12523 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12524# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12525 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12526# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12527 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12528# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12529 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12530
12531#endif
12532
12533#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12534 IEMOP_MNEMONIC0EX(a_Lower, \
12535 #a_Lower, \
12536 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12537#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12538 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12539 #a_Lower " " #a_Op1, \
12540 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12541#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12542 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12543 #a_Lower " " #a_Op1 "," #a_Op2, \
12544 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12545#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12546 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12547 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12548 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12549#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12550 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12551 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12552 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12553
12554/** @} */
12555
12556
12557/** @name Opcode Helpers.
12558 * @{
12559 */
12560
12561#ifdef IN_RING3
12562# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12563 do { \
12564 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12565 else \
12566 { \
12567 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12568 return IEMOP_RAISE_INVALID_OPCODE(); \
12569 } \
12570 } while (0)
12571#else
12572# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12573 do { \
12574 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12575 else return IEMOP_RAISE_INVALID_OPCODE(); \
12576 } while (0)
12577#endif
12578
12579/** The instruction requires a 186 or later. */
12580#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12581# define IEMOP_HLP_MIN_186() do { } while (0)
12582#else
12583# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12584#endif
12585
12586/** The instruction requires a 286 or later. */
12587#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12588# define IEMOP_HLP_MIN_286() do { } while (0)
12589#else
12590# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12591#endif
12592
12593/** The instruction requires a 386 or later. */
12594#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12595# define IEMOP_HLP_MIN_386() do { } while (0)
12596#else
12597# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12598#endif
12599
12600/** The instruction requires a 386 or later if the given expression is true. */
12601#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12602# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12603#else
12604# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12605#endif
12606
12607/** The instruction requires a 486 or later. */
12608#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12609# define IEMOP_HLP_MIN_486() do { } while (0)
12610#else
12611# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12612#endif
12613
12614/** The instruction requires a Pentium (586) or later. */
12615#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12616# define IEMOP_HLP_MIN_586() do { } while (0)
12617#else
12618# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12619#endif
12620
12621/** The instruction requires a PentiumPro (686) or later. */
12622#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12623# define IEMOP_HLP_MIN_686() do { } while (0)
12624#else
12625# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12626#endif
12627
12628
12629/** The instruction raises an \#UD in real and V8086 mode. */
12630#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12631 do \
12632 { \
12633 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12634 else return IEMOP_RAISE_INVALID_OPCODE(); \
12635 } while (0)
12636
12637#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12638/** This instruction raises an \#UD in real and V8086 mode or when not using a
12639 * 64-bit code segment when in long mode (applicable to all VMX instructions
12640 * except VMCALL).
12641 */
12642#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12643 do \
12644 { \
12645 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12646 && ( !IEM_IS_LONG_MODE(pVCpu) \
12647 || IEM_IS_64BIT_CODE(pVCpu))) \
12648 { /* likely */ } \
12649 else \
12650 { \
12651 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12652 { \
12653 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12654 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12655 return IEMOP_RAISE_INVALID_OPCODE(); \
12656 } \
12657 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12658 { \
12659 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12660 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12661 return IEMOP_RAISE_INVALID_OPCODE(); \
12662 } \
12663 } \
12664 } while (0)
12665
12666/** The instruction can only be executed in VMX operation (VMX root mode and
12667 * non-root mode).
12668 *
12669 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12670 */
12671# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12672 do \
12673 { \
12674 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12675 else \
12676 { \
12677 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12678 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12679 return IEMOP_RAISE_INVALID_OPCODE(); \
12680 } \
12681 } while (0)
12682#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12683
12684/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12685 * 64-bit mode. */
12686#define IEMOP_HLP_NO_64BIT() \
12687 do \
12688 { \
12689 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12690 return IEMOP_RAISE_INVALID_OPCODE(); \
12691 } while (0)
12692
12693/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12694 * 64-bit mode. */
12695#define IEMOP_HLP_ONLY_64BIT() \
12696 do \
12697 { \
12698 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12699 return IEMOP_RAISE_INVALID_OPCODE(); \
12700 } while (0)
12701
12702/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12703#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12704 do \
12705 { \
12706 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12707 iemRecalEffOpSize64Default(pVCpu); \
12708 } while (0)
12709
12710/** The instruction has 64-bit operand size if 64-bit mode. */
12711#define IEMOP_HLP_64BIT_OP_SIZE() \
12712 do \
12713 { \
12714 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12715 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12716 } while (0)
12717
12718/** Only a REX prefix immediately preceeding the first opcode byte takes
12719 * effect. This macro helps ensuring this as well as logging bad guest code. */
12720#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12721 do \
12722 { \
12723 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12724 { \
12725 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12726 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12727 pVCpu->iem.s.uRexB = 0; \
12728 pVCpu->iem.s.uRexIndex = 0; \
12729 pVCpu->iem.s.uRexReg = 0; \
12730 iemRecalEffOpSize(pVCpu); \
12731 } \
12732 } while (0)
12733
12734/**
12735 * Done decoding.
12736 */
12737#define IEMOP_HLP_DONE_DECODING() \
12738 do \
12739 { \
12740 /*nothing for now, maybe later... */ \
12741 } while (0)
12742
12743/**
12744 * Done decoding, raise \#UD exception if lock prefix present.
12745 */
12746#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12747 do \
12748 { \
12749 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12750 { /* likely */ } \
12751 else \
12752 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12753 } while (0)
12754
12755
12756/**
12757 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12758 * repnz or size prefixes are present, or if in real or v8086 mode.
12759 */
12760#define IEMOP_HLP_DONE_VEX_DECODING() \
12761 do \
12762 { \
12763 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12764 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12765 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12766 { /* likely */ } \
12767 else \
12768 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12769 } while (0)
12770
12771/**
12772 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12773 * repnz or size prefixes are present, or if in real or v8086 mode.
12774 */
12775#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12776 do \
12777 { \
12778 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12779 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12780 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12781 && pVCpu->iem.s.uVexLength == 0)) \
12782 { /* likely */ } \
12783 else \
12784 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12785 } while (0)
12786
12787
12788/**
12789 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12790 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12791 * register 0, or if in real or v8086 mode.
12792 */
12793#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12794 do \
12795 { \
12796 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12797 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12798 && !pVCpu->iem.s.uVex3rdReg \
12799 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12800 { /* likely */ } \
12801 else \
12802 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12803 } while (0)
12804
12805/**
12806 * Done decoding VEX, no V, L=0.
12807 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12808 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12809 */
12810#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12811 do \
12812 { \
12813 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12814 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12815 && pVCpu->iem.s.uVexLength == 0 \
12816 && pVCpu->iem.s.uVex3rdReg == 0 \
12817 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12818 { /* likely */ } \
12819 else \
12820 return IEMOP_RAISE_INVALID_OPCODE(); \
12821 } while (0)
12822
12823#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12824 do \
12825 { \
12826 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12827 { /* likely */ } \
12828 else \
12829 { \
12830 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12831 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12832 } \
12833 } while (0)
12834#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12835 do \
12836 { \
12837 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12838 { /* likely */ } \
12839 else \
12840 { \
12841 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12842 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12843 } \
12844 } while (0)
12845
12846/**
12847 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12848 * are present.
12849 */
12850#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12851 do \
12852 { \
12853 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12854 { /* likely */ } \
12855 else \
12856 return IEMOP_RAISE_INVALID_OPCODE(); \
12857 } while (0)
12858
12859/**
12860 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12861 * prefixes are present.
12862 */
12863#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12864 do \
12865 { \
12866 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12867 { /* likely */ } \
12868 else \
12869 return IEMOP_RAISE_INVALID_OPCODE(); \
12870 } while (0)
12871
12872
12873/**
12874 * Calculates the effective address of a ModR/M memory operand.
12875 *
12876 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12877 *
12878 * @return Strict VBox status code.
12879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12880 * @param bRm The ModRM byte.
12881 * @param cbImm The size of any immediate following the
12882 * effective address opcode bytes. Important for
12883 * RIP relative addressing.
12884 * @param pGCPtrEff Where to return the effective address.
12885 */
12886IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12887{
12888 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12889# define SET_SS_DEF() \
12890 do \
12891 { \
12892 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12893 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12894 } while (0)
12895
12896 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12897 {
12898/** @todo Check the effective address size crap! */
12899 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12900 {
12901 uint16_t u16EffAddr;
12902
12903 /* Handle the disp16 form with no registers first. */
12904 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12905 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12906 else
12907 {
12908 /* Get the displacment. */
12909 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12910 {
12911 case 0: u16EffAddr = 0; break;
12912 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12913 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12914 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12915 }
12916
12917 /* Add the base and index registers to the disp. */
12918 switch (bRm & X86_MODRM_RM_MASK)
12919 {
12920 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12921 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12922 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12923 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12924 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12925 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12926 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12927 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12928 }
12929 }
12930
12931 *pGCPtrEff = u16EffAddr;
12932 }
12933 else
12934 {
12935 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12936 uint32_t u32EffAddr;
12937
12938 /* Handle the disp32 form with no registers first. */
12939 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12940 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12941 else
12942 {
12943 /* Get the register (or SIB) value. */
12944 switch ((bRm & X86_MODRM_RM_MASK))
12945 {
12946 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12947 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12948 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12949 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12950 case 4: /* SIB */
12951 {
12952 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12953
12954 /* Get the index and scale it. */
12955 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12956 {
12957 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12958 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12959 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12960 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12961 case 4: u32EffAddr = 0; /*none */ break;
12962 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12963 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12964 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12965 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12966 }
12967 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12968
12969 /* add base */
12970 switch (bSib & X86_SIB_BASE_MASK)
12971 {
12972 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12973 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12974 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12975 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12976 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12977 case 5:
12978 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12979 {
12980 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12981 SET_SS_DEF();
12982 }
12983 else
12984 {
12985 uint32_t u32Disp;
12986 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12987 u32EffAddr += u32Disp;
12988 }
12989 break;
12990 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12991 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12992 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12993 }
12994 break;
12995 }
12996 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12997 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12998 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12999 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13000 }
13001
13002 /* Get and add the displacement. */
13003 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13004 {
13005 case 0:
13006 break;
13007 case 1:
13008 {
13009 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13010 u32EffAddr += i8Disp;
13011 break;
13012 }
13013 case 2:
13014 {
13015 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13016 u32EffAddr += u32Disp;
13017 break;
13018 }
13019 default:
13020 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13021 }
13022
13023 }
13024 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13025 *pGCPtrEff = u32EffAddr;
13026 else
13027 {
13028 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13029 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13030 }
13031 }
13032 }
13033 else
13034 {
13035 uint64_t u64EffAddr;
13036
13037 /* Handle the rip+disp32 form with no registers first. */
13038 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13039 {
13040 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13041 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13042 }
13043 else
13044 {
13045 /* Get the register (or SIB) value. */
13046 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13047 {
13048 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13049 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13050 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13051 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13052 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13053 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13054 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13055 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13056 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13057 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13058 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13059 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13060 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13061 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13062 /* SIB */
13063 case 4:
13064 case 12:
13065 {
13066 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13067
13068 /* Get the index and scale it. */
13069 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13070 {
13071 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13072 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13073 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13074 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13075 case 4: u64EffAddr = 0; /*none */ break;
13076 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13077 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13078 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13079 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13080 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13081 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13082 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13083 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13084 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13085 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13086 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13087 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13088 }
13089 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13090
13091 /* add base */
13092 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13093 {
13094 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13095 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13096 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13097 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13098 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13099 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13100 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13101 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13102 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13103 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13104 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13105 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13106 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13107 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13108 /* complicated encodings */
13109 case 5:
13110 case 13:
13111 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13112 {
13113 if (!pVCpu->iem.s.uRexB)
13114 {
13115 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13116 SET_SS_DEF();
13117 }
13118 else
13119 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13120 }
13121 else
13122 {
13123 uint32_t u32Disp;
13124 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13125 u64EffAddr += (int32_t)u32Disp;
13126 }
13127 break;
13128 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13129 }
13130 break;
13131 }
13132 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13133 }
13134
13135 /* Get and add the displacement. */
13136 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13137 {
13138 case 0:
13139 break;
13140 case 1:
13141 {
13142 int8_t i8Disp;
13143 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13144 u64EffAddr += i8Disp;
13145 break;
13146 }
13147 case 2:
13148 {
13149 uint32_t u32Disp;
13150 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13151 u64EffAddr += (int32_t)u32Disp;
13152 break;
13153 }
13154 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13155 }
13156
13157 }
13158
13159 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13160 *pGCPtrEff = u64EffAddr;
13161 else
13162 {
13163 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13164 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13165 }
13166 }
13167
13168 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13169 return VINF_SUCCESS;
13170}
13171
13172
13173/**
13174 * Calculates the effective address of a ModR/M memory operand.
13175 *
13176 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13177 *
13178 * @return Strict VBox status code.
13179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13180 * @param bRm The ModRM byte.
13181 * @param cbImm The size of any immediate following the
13182 * effective address opcode bytes. Important for
13183 * RIP relative addressing.
13184 * @param pGCPtrEff Where to return the effective address.
13185 * @param offRsp RSP displacement.
13186 */
13187IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13188{
13189 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13190# define SET_SS_DEF() \
13191 do \
13192 { \
13193 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13194 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13195 } while (0)
13196
13197 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13198 {
13199/** @todo Check the effective address size crap! */
13200 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13201 {
13202 uint16_t u16EffAddr;
13203
13204 /* Handle the disp16 form with no registers first. */
13205 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13206 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13207 else
13208 {
13209 /* Get the displacment. */
13210 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13211 {
13212 case 0: u16EffAddr = 0; break;
13213 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13214 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13215 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13216 }
13217
13218 /* Add the base and index registers to the disp. */
13219 switch (bRm & X86_MODRM_RM_MASK)
13220 {
13221 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13222 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13223 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13224 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13225 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13226 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13227 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13228 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13229 }
13230 }
13231
13232 *pGCPtrEff = u16EffAddr;
13233 }
13234 else
13235 {
13236 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13237 uint32_t u32EffAddr;
13238
13239 /* Handle the disp32 form with no registers first. */
13240 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13241 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13242 else
13243 {
13244 /* Get the register (or SIB) value. */
13245 switch ((bRm & X86_MODRM_RM_MASK))
13246 {
13247 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13248 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13249 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13250 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13251 case 4: /* SIB */
13252 {
13253 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13254
13255 /* Get the index and scale it. */
13256 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13257 {
13258 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13259 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13260 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13261 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13262 case 4: u32EffAddr = 0; /*none */ break;
13263 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13264 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13265 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13266 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13267 }
13268 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13269
13270 /* add base */
13271 switch (bSib & X86_SIB_BASE_MASK)
13272 {
13273 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13274 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13275 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13276 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13277 case 4:
13278 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13279 SET_SS_DEF();
13280 break;
13281 case 5:
13282 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13283 {
13284 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13285 SET_SS_DEF();
13286 }
13287 else
13288 {
13289 uint32_t u32Disp;
13290 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13291 u32EffAddr += u32Disp;
13292 }
13293 break;
13294 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13295 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13296 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13297 }
13298 break;
13299 }
13300 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13301 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13302 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13303 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13304 }
13305
13306 /* Get and add the displacement. */
13307 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13308 {
13309 case 0:
13310 break;
13311 case 1:
13312 {
13313 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13314 u32EffAddr += i8Disp;
13315 break;
13316 }
13317 case 2:
13318 {
13319 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13320 u32EffAddr += u32Disp;
13321 break;
13322 }
13323 default:
13324 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13325 }
13326
13327 }
13328 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13329 *pGCPtrEff = u32EffAddr;
13330 else
13331 {
13332 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13333 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13334 }
13335 }
13336 }
13337 else
13338 {
13339 uint64_t u64EffAddr;
13340
13341 /* Handle the rip+disp32 form with no registers first. */
13342 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13343 {
13344 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13345 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13346 }
13347 else
13348 {
13349 /* Get the register (or SIB) value. */
13350 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13351 {
13352 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13353 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13354 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13355 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13356 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13357 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13358 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13359 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13360 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13361 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13362 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13363 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13364 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13365 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13366 /* SIB */
13367 case 4:
13368 case 12:
13369 {
13370 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13371
13372 /* Get the index and scale it. */
13373 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13374 {
13375 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13376 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13377 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13378 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13379 case 4: u64EffAddr = 0; /*none */ break;
13380 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13381 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13382 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13383 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13384 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13385 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13386 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13387 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13388 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13389 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13390 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13391 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13392 }
13393 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13394
13395 /* add base */
13396 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13397 {
13398 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13399 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13400 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13401 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13402 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13403 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13404 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13405 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13406 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13407 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13408 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13409 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13410 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13411 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13412 /* complicated encodings */
13413 case 5:
13414 case 13:
13415 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13416 {
13417 if (!pVCpu->iem.s.uRexB)
13418 {
13419 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13420 SET_SS_DEF();
13421 }
13422 else
13423 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13424 }
13425 else
13426 {
13427 uint32_t u32Disp;
13428 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13429 u64EffAddr += (int32_t)u32Disp;
13430 }
13431 break;
13432 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13433 }
13434 break;
13435 }
13436 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13437 }
13438
13439 /* Get and add the displacement. */
13440 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13441 {
13442 case 0:
13443 break;
13444 case 1:
13445 {
13446 int8_t i8Disp;
13447 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13448 u64EffAddr += i8Disp;
13449 break;
13450 }
13451 case 2:
13452 {
13453 uint32_t u32Disp;
13454 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13455 u64EffAddr += (int32_t)u32Disp;
13456 break;
13457 }
13458 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13459 }
13460
13461 }
13462
13463 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13464 *pGCPtrEff = u64EffAddr;
13465 else
13466 {
13467 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13468 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13469 }
13470 }
13471
13472 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13473 return VINF_SUCCESS;
13474}
13475
13476
13477#ifdef IEM_WITH_SETJMP
13478/**
13479 * Calculates the effective address of a ModR/M memory operand.
13480 *
13481 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13482 *
13483 * May longjmp on internal error.
13484 *
13485 * @return The effective address.
13486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13487 * @param bRm The ModRM byte.
13488 * @param cbImm The size of any immediate following the
13489 * effective address opcode bytes. Important for
13490 * RIP relative addressing.
13491 */
13492IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13493{
13494 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13495# define SET_SS_DEF() \
13496 do \
13497 { \
13498 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13499 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13500 } while (0)
13501
13502 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13503 {
13504/** @todo Check the effective address size crap! */
13505 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13506 {
13507 uint16_t u16EffAddr;
13508
13509 /* Handle the disp16 form with no registers first. */
13510 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13511 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13512 else
13513 {
13514 /* Get the displacment. */
13515 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13516 {
13517 case 0: u16EffAddr = 0; break;
13518 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13519 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13520 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13521 }
13522
13523 /* Add the base and index registers to the disp. */
13524 switch (bRm & X86_MODRM_RM_MASK)
13525 {
13526 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13527 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13528 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13529 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13530 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13531 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13532 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13533 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13534 }
13535 }
13536
13537 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13538 return u16EffAddr;
13539 }
13540
13541 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13542 uint32_t u32EffAddr;
13543
13544 /* Handle the disp32 form with no registers first. */
13545 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13546 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13547 else
13548 {
13549 /* Get the register (or SIB) value. */
13550 switch ((bRm & X86_MODRM_RM_MASK))
13551 {
13552 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13553 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13554 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13555 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13556 case 4: /* SIB */
13557 {
13558 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13559
13560 /* Get the index and scale it. */
13561 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13562 {
13563 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13564 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13565 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13566 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13567 case 4: u32EffAddr = 0; /*none */ break;
13568 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13569 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13570 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13571 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13572 }
13573 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13574
13575 /* add base */
13576 switch (bSib & X86_SIB_BASE_MASK)
13577 {
13578 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13579 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13580 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13581 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13582 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13583 case 5:
13584 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13585 {
13586 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13587 SET_SS_DEF();
13588 }
13589 else
13590 {
13591 uint32_t u32Disp;
13592 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13593 u32EffAddr += u32Disp;
13594 }
13595 break;
13596 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13597 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13598 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13599 }
13600 break;
13601 }
13602 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13603 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13604 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13605 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13606 }
13607
13608 /* Get and add the displacement. */
13609 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13610 {
13611 case 0:
13612 break;
13613 case 1:
13614 {
13615 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13616 u32EffAddr += i8Disp;
13617 break;
13618 }
13619 case 2:
13620 {
13621 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13622 u32EffAddr += u32Disp;
13623 break;
13624 }
13625 default:
13626 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13627 }
13628 }
13629
13630 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13631 {
13632 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13633 return u32EffAddr;
13634 }
13635 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13636 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13637 return u32EffAddr & UINT16_MAX;
13638 }
13639
13640 uint64_t u64EffAddr;
13641
13642 /* Handle the rip+disp32 form with no registers first. */
13643 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13644 {
13645 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13646 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13647 }
13648 else
13649 {
13650 /* Get the register (or SIB) value. */
13651 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13652 {
13653 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13654 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13655 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13656 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13657 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13658 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13659 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13660 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13661 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13662 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13663 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13664 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13665 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13666 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13667 /* SIB */
13668 case 4:
13669 case 12:
13670 {
13671 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13672
13673 /* Get the index and scale it. */
13674 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13675 {
13676 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13677 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13678 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13679 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13680 case 4: u64EffAddr = 0; /*none */ break;
13681 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13682 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13683 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13684 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13685 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13686 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13687 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13688 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13689 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13690 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13691 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13692 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13693 }
13694 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13695
13696 /* add base */
13697 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13698 {
13699 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13700 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13701 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13702 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13703 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13704 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13705 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13706 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13707 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13708 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13709 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13710 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13711 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13712 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13713 /* complicated encodings */
13714 case 5:
13715 case 13:
13716 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13717 {
13718 if (!pVCpu->iem.s.uRexB)
13719 {
13720 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13721 SET_SS_DEF();
13722 }
13723 else
13724 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13725 }
13726 else
13727 {
13728 uint32_t u32Disp;
13729 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13730 u64EffAddr += (int32_t)u32Disp;
13731 }
13732 break;
13733 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13734 }
13735 break;
13736 }
13737 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13738 }
13739
13740 /* Get and add the displacement. */
13741 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13742 {
13743 case 0:
13744 break;
13745 case 1:
13746 {
13747 int8_t i8Disp;
13748 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13749 u64EffAddr += i8Disp;
13750 break;
13751 }
13752 case 2:
13753 {
13754 uint32_t u32Disp;
13755 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13756 u64EffAddr += (int32_t)u32Disp;
13757 break;
13758 }
13759 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13760 }
13761
13762 }
13763
13764 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13765 {
13766 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13767 return u64EffAddr;
13768 }
13769 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13770 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13771 return u64EffAddr & UINT32_MAX;
13772}
13773#endif /* IEM_WITH_SETJMP */
13774
13775/** @} */
13776
13777
13778
13779/*
13780 * Include the instructions
13781 */
13782#include "IEMAllInstructions.cpp.h"
13783
13784
13785
13786#ifdef LOG_ENABLED
13787/**
13788 * Logs the current instruction.
13789 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13790 * @param fSameCtx Set if we have the same context information as the VMM,
13791 * clear if we may have already executed an instruction in
13792 * our debug context. When clear, we assume IEMCPU holds
13793 * valid CPU mode info.
13794 *
13795 * The @a fSameCtx parameter is now misleading and obsolete.
13796 * @param pszFunction The IEM function doing the execution.
13797 */
13798IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13799{
13800# ifdef IN_RING3
13801 if (LogIs2Enabled())
13802 {
13803 char szInstr[256];
13804 uint32_t cbInstr = 0;
13805 if (fSameCtx)
13806 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13807 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13808 szInstr, sizeof(szInstr), &cbInstr);
13809 else
13810 {
13811 uint32_t fFlags = 0;
13812 switch (pVCpu->iem.s.enmCpuMode)
13813 {
13814 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13815 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13816 case IEMMODE_16BIT:
13817 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13818 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13819 else
13820 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13821 break;
13822 }
13823 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13824 szInstr, sizeof(szInstr), &cbInstr);
13825 }
13826
13827 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13828 Log2(("**** %s\n"
13829 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13830 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13831 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13832 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13833 " %s\n"
13834 , pszFunction,
13835 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13836 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13837 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13838 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13839 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13840 szInstr));
13841
13842 if (LogIs3Enabled())
13843 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13844 }
13845 else
13846# endif
13847 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13848 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13849 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13850}
13851#endif /* LOG_ENABLED */
13852
13853
13854/**
13855 * Makes status code addjustments (pass up from I/O and access handler)
13856 * as well as maintaining statistics.
13857 *
13858 * @returns Strict VBox status code to pass up.
13859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13860 * @param rcStrict The status from executing an instruction.
13861 */
13862DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13863{
13864 if (rcStrict != VINF_SUCCESS)
13865 {
13866 if (RT_SUCCESS(rcStrict))
13867 {
13868 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13869 || rcStrict == VINF_IOM_R3_IOPORT_READ
13870 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13871 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13872 || rcStrict == VINF_IOM_R3_MMIO_READ
13873 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13874 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13875 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13876 || rcStrict == VINF_CPUM_R3_MSR_READ
13877 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13878 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13879 || rcStrict == VINF_EM_RAW_TO_R3
13880 || rcStrict == VINF_EM_TRIPLE_FAULT
13881 || rcStrict == VINF_GIM_R3_HYPERCALL
13882 /* raw-mode / virt handlers only: */
13883 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13884 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13885 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13886 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13887 || rcStrict == VINF_SELM_SYNC_GDT
13888 || rcStrict == VINF_CSAM_PENDING_ACTION
13889 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13890 /* nested hw.virt codes: */
13891 || rcStrict == VINF_VMX_VMEXIT
13892 || rcStrict == VINF_SVM_VMEXIT
13893 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13894/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13895 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13896#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13897 if ( rcStrict == VINF_VMX_VMEXIT
13898 && rcPassUp == VINF_SUCCESS)
13899 rcStrict = VINF_SUCCESS;
13900 else
13901#endif
13902#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13903 if ( rcStrict == VINF_SVM_VMEXIT
13904 && rcPassUp == VINF_SUCCESS)
13905 rcStrict = VINF_SUCCESS;
13906 else
13907#endif
13908 if (rcPassUp == VINF_SUCCESS)
13909 pVCpu->iem.s.cRetInfStatuses++;
13910 else if ( rcPassUp < VINF_EM_FIRST
13911 || rcPassUp > VINF_EM_LAST
13912 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13913 {
13914 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13915 pVCpu->iem.s.cRetPassUpStatus++;
13916 rcStrict = rcPassUp;
13917 }
13918 else
13919 {
13920 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13921 pVCpu->iem.s.cRetInfStatuses++;
13922 }
13923 }
13924 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13925 pVCpu->iem.s.cRetAspectNotImplemented++;
13926 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13927 pVCpu->iem.s.cRetInstrNotImplemented++;
13928 else
13929 pVCpu->iem.s.cRetErrStatuses++;
13930 }
13931 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13932 {
13933 pVCpu->iem.s.cRetPassUpStatus++;
13934 rcStrict = pVCpu->iem.s.rcPassUp;
13935 }
13936
13937 return rcStrict;
13938}
13939
13940
13941/**
13942 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13943 * IEMExecOneWithPrefetchedByPC.
13944 *
13945 * Similar code is found in IEMExecLots.
13946 *
13947 * @return Strict VBox status code.
13948 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13949 * @param fExecuteInhibit If set, execute the instruction following CLI,
13950 * POP SS and MOV SS,GR.
13951 * @param pszFunction The calling function name.
13952 */
13953DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13954{
13955 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13956 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13957 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13958 RT_NOREF_PV(pszFunction);
13959
13960#ifdef IEM_WITH_SETJMP
13961 VBOXSTRICTRC rcStrict;
13962 jmp_buf JmpBuf;
13963 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13964 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13965 if ((rcStrict = setjmp(JmpBuf)) == 0)
13966 {
13967 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13968 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13969 }
13970 else
13971 pVCpu->iem.s.cLongJumps++;
13972 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13973#else
13974 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13975 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13976#endif
13977 if (rcStrict == VINF_SUCCESS)
13978 pVCpu->iem.s.cInstructions++;
13979 if (pVCpu->iem.s.cActiveMappings > 0)
13980 {
13981 Assert(rcStrict != VINF_SUCCESS);
13982 iemMemRollback(pVCpu);
13983 }
13984 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13985 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13986 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13987
13988//#ifdef DEBUG
13989// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13990//#endif
13991
13992 /* Execute the next instruction as well if a cli, pop ss or
13993 mov ss, Gr has just completed successfully. */
13994 if ( fExecuteInhibit
13995 && rcStrict == VINF_SUCCESS
13996 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13997 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13998 {
13999 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14000 if (rcStrict == VINF_SUCCESS)
14001 {
14002#ifdef LOG_ENABLED
14003 iemLogCurInstr(pVCpu, false, pszFunction);
14004#endif
14005#ifdef IEM_WITH_SETJMP
14006 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14007 if ((rcStrict = setjmp(JmpBuf)) == 0)
14008 {
14009 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14010 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14011 }
14012 else
14013 pVCpu->iem.s.cLongJumps++;
14014 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14015#else
14016 IEM_OPCODE_GET_NEXT_U8(&b);
14017 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14018#endif
14019 if (rcStrict == VINF_SUCCESS)
14020 pVCpu->iem.s.cInstructions++;
14021 if (pVCpu->iem.s.cActiveMappings > 0)
14022 {
14023 Assert(rcStrict != VINF_SUCCESS);
14024 iemMemRollback(pVCpu);
14025 }
14026 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14027 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14028 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14029 }
14030 else if (pVCpu->iem.s.cActiveMappings > 0)
14031 iemMemRollback(pVCpu);
14032 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14033 }
14034
14035 /*
14036 * Return value fiddling, statistics and sanity assertions.
14037 */
14038 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14039
14040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14042 return rcStrict;
14043}
14044
14045
14046#ifdef IN_RC
14047/**
14048 * Re-enters raw-mode or ensure we return to ring-3.
14049 *
14050 * @returns rcStrict, maybe modified.
14051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14052 * @param rcStrict The status code returne by the interpreter.
14053 */
14054DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14055{
14056 if ( !pVCpu->iem.s.fInPatchCode
14057 && ( rcStrict == VINF_SUCCESS
14058 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14059 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14060 {
14061 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14062 CPUMRawEnter(pVCpu);
14063 else
14064 {
14065 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14066 rcStrict = VINF_EM_RESCHEDULE;
14067 }
14068 }
14069 return rcStrict;
14070}
14071#endif
14072
14073
14074/**
14075 * Execute one instruction.
14076 *
14077 * @return Strict VBox status code.
14078 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14079 */
14080VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14081{
14082#ifdef LOG_ENABLED
14083 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14084#endif
14085
14086 /*
14087 * Do the decoding and emulation.
14088 */
14089 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14090 if (rcStrict == VINF_SUCCESS)
14091 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14092 else if (pVCpu->iem.s.cActiveMappings > 0)
14093 iemMemRollback(pVCpu);
14094
14095#ifdef IN_RC
14096 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14097#endif
14098 if (rcStrict != VINF_SUCCESS)
14099 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14100 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14101 return rcStrict;
14102}
14103
14104
14105VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14106{
14107 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14108
14109 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14110 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14111 if (rcStrict == VINF_SUCCESS)
14112 {
14113 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14114 if (pcbWritten)
14115 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14116 }
14117 else if (pVCpu->iem.s.cActiveMappings > 0)
14118 iemMemRollback(pVCpu);
14119
14120#ifdef IN_RC
14121 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14122#endif
14123 return rcStrict;
14124}
14125
14126
14127VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14128 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14129{
14130 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14131
14132 VBOXSTRICTRC rcStrict;
14133 if ( cbOpcodeBytes
14134 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14135 {
14136 iemInitDecoder(pVCpu, false);
14137#ifdef IEM_WITH_CODE_TLB
14138 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14139 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14140 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14141 pVCpu->iem.s.offCurInstrStart = 0;
14142 pVCpu->iem.s.offInstrNextByte = 0;
14143#else
14144 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14145 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14146#endif
14147 rcStrict = VINF_SUCCESS;
14148 }
14149 else
14150 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14151 if (rcStrict == VINF_SUCCESS)
14152 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14153 else if (pVCpu->iem.s.cActiveMappings > 0)
14154 iemMemRollback(pVCpu);
14155
14156#ifdef IN_RC
14157 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14158#endif
14159 return rcStrict;
14160}
14161
14162
14163VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14164{
14165 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14166
14167 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14168 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14169 if (rcStrict == VINF_SUCCESS)
14170 {
14171 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14172 if (pcbWritten)
14173 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14174 }
14175 else if (pVCpu->iem.s.cActiveMappings > 0)
14176 iemMemRollback(pVCpu);
14177
14178#ifdef IN_RC
14179 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14180#endif
14181 return rcStrict;
14182}
14183
14184
14185VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14186 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14187{
14188 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14189
14190 VBOXSTRICTRC rcStrict;
14191 if ( cbOpcodeBytes
14192 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14193 {
14194 iemInitDecoder(pVCpu, true);
14195#ifdef IEM_WITH_CODE_TLB
14196 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14197 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14198 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14199 pVCpu->iem.s.offCurInstrStart = 0;
14200 pVCpu->iem.s.offInstrNextByte = 0;
14201#else
14202 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14203 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14204#endif
14205 rcStrict = VINF_SUCCESS;
14206 }
14207 else
14208 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14209 if (rcStrict == VINF_SUCCESS)
14210 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14211 else if (pVCpu->iem.s.cActiveMappings > 0)
14212 iemMemRollback(pVCpu);
14213
14214#ifdef IN_RC
14215 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14216#endif
14217 return rcStrict;
14218}
14219
14220
14221/**
14222 * For debugging DISGetParamSize, may come in handy.
14223 *
14224 * @returns Strict VBox status code.
14225 * @param pVCpu The cross context virtual CPU structure of the
14226 * calling EMT.
14227 * @param pCtxCore The context core structure.
14228 * @param OpcodeBytesPC The PC of the opcode bytes.
14229 * @param pvOpcodeBytes Prefeched opcode bytes.
14230 * @param cbOpcodeBytes Number of prefetched bytes.
14231 * @param pcbWritten Where to return the number of bytes written.
14232 * Optional.
14233 */
14234VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14235 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14236 uint32_t *pcbWritten)
14237{
14238 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14239
14240 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14241 VBOXSTRICTRC rcStrict;
14242 if ( cbOpcodeBytes
14243 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14244 {
14245 iemInitDecoder(pVCpu, true);
14246#ifdef IEM_WITH_CODE_TLB
14247 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14248 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14249 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14250 pVCpu->iem.s.offCurInstrStart = 0;
14251 pVCpu->iem.s.offInstrNextByte = 0;
14252#else
14253 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14254 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14255#endif
14256 rcStrict = VINF_SUCCESS;
14257 }
14258 else
14259 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14260 if (rcStrict == VINF_SUCCESS)
14261 {
14262 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14263 if (pcbWritten)
14264 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14265 }
14266 else if (pVCpu->iem.s.cActiveMappings > 0)
14267 iemMemRollback(pVCpu);
14268
14269#ifdef IN_RC
14270 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14271#endif
14272 return rcStrict;
14273}
14274
14275
14276VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14277{
14278 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14279
14280 /*
14281 * See if there is an interrupt pending in TRPM, inject it if we can.
14282 */
14283 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14284#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14285 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14286 if (fIntrEnabled)
14287 {
14288 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14289 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14290 else
14291 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14292 }
14293#else
14294 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14295#endif
14296 if ( fIntrEnabled
14297 && TRPMHasTrap(pVCpu)
14298 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14299 {
14300 uint8_t u8TrapNo;
14301 TRPMEVENT enmType;
14302 RTGCUINT uErrCode;
14303 RTGCPTR uCr2;
14304 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14305 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14306 TRPMResetTrap(pVCpu);
14307 }
14308
14309 /*
14310 * Initial decoder init w/ prefetch, then setup setjmp.
14311 */
14312 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14313 if (rcStrict == VINF_SUCCESS)
14314 {
14315#ifdef IEM_WITH_SETJMP
14316 jmp_buf JmpBuf;
14317 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14318 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14319 pVCpu->iem.s.cActiveMappings = 0;
14320 if ((rcStrict = setjmp(JmpBuf)) == 0)
14321#endif
14322 {
14323 /*
14324 * The run loop. We limit ourselves to 4096 instructions right now.
14325 */
14326 PVM pVM = pVCpu->CTX_SUFF(pVM);
14327 uint32_t cInstr = 4096;
14328 for (;;)
14329 {
14330 /*
14331 * Log the state.
14332 */
14333#ifdef LOG_ENABLED
14334 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14335#endif
14336
14337 /*
14338 * Do the decoding and emulation.
14339 */
14340 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14341 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14342 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14343 {
14344 Assert(pVCpu->iem.s.cActiveMappings == 0);
14345 pVCpu->iem.s.cInstructions++;
14346 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14347 {
14348 uint64_t fCpu = pVCpu->fLocalForcedActions
14349 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14350 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14351 | VMCPU_FF_TLB_FLUSH
14352#ifdef VBOX_WITH_RAW_MODE
14353 | VMCPU_FF_TRPM_SYNC_IDT
14354 | VMCPU_FF_SELM_SYNC_TSS
14355 | VMCPU_FF_SELM_SYNC_GDT
14356 | VMCPU_FF_SELM_SYNC_LDT
14357#endif
14358 | VMCPU_FF_INHIBIT_INTERRUPTS
14359 | VMCPU_FF_BLOCK_NMIS
14360 | VMCPU_FF_UNHALT ));
14361
14362 if (RT_LIKELY( ( !fCpu
14363 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14364 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14365 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14366 {
14367 if (cInstr-- > 0)
14368 {
14369 Assert(pVCpu->iem.s.cActiveMappings == 0);
14370 iemReInitDecoder(pVCpu);
14371 continue;
14372 }
14373 }
14374 }
14375 Assert(pVCpu->iem.s.cActiveMappings == 0);
14376 }
14377 else if (pVCpu->iem.s.cActiveMappings > 0)
14378 iemMemRollback(pVCpu);
14379 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14380 break;
14381 }
14382 }
14383#ifdef IEM_WITH_SETJMP
14384 else
14385 {
14386 if (pVCpu->iem.s.cActiveMappings > 0)
14387 iemMemRollback(pVCpu);
14388 pVCpu->iem.s.cLongJumps++;
14389 }
14390 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14391#endif
14392
14393 /*
14394 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14395 */
14396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14398 }
14399 else
14400 {
14401 if (pVCpu->iem.s.cActiveMappings > 0)
14402 iemMemRollback(pVCpu);
14403
14404#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14405 /*
14406 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14407 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14408 */
14409 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14410#endif
14411 }
14412
14413 /*
14414 * Maybe re-enter raw-mode and log.
14415 */
14416#ifdef IN_RC
14417 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14418#endif
14419 if (rcStrict != VINF_SUCCESS)
14420 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14421 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14422 if (pcInstructions)
14423 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14424 return rcStrict;
14425}
14426
14427
14428/**
14429 * Interface used by EMExecuteExec, does exit statistics and limits.
14430 *
14431 * @returns Strict VBox status code.
14432 * @param pVCpu The cross context virtual CPU structure.
14433 * @param fWillExit To be defined.
14434 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14435 * @param cMaxInstructions Maximum number of instructions to execute.
14436 * @param cMaxInstructionsWithoutExits
14437 * The max number of instructions without exits.
14438 * @param pStats Where to return statistics.
14439 */
14440VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14441 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14442{
14443 NOREF(fWillExit); /** @todo define flexible exit crits */
14444
14445 /*
14446 * Initialize return stats.
14447 */
14448 pStats->cInstructions = 0;
14449 pStats->cExits = 0;
14450 pStats->cMaxExitDistance = 0;
14451 pStats->cReserved = 0;
14452
14453 /*
14454 * Initial decoder init w/ prefetch, then setup setjmp.
14455 */
14456 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14457 if (rcStrict == VINF_SUCCESS)
14458 {
14459#ifdef IEM_WITH_SETJMP
14460 jmp_buf JmpBuf;
14461 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14462 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14463 pVCpu->iem.s.cActiveMappings = 0;
14464 if ((rcStrict = setjmp(JmpBuf)) == 0)
14465#endif
14466 {
14467#ifdef IN_RING0
14468 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14469#endif
14470 uint32_t cInstructionSinceLastExit = 0;
14471
14472 /*
14473 * The run loop. We limit ourselves to 4096 instructions right now.
14474 */
14475 PVM pVM = pVCpu->CTX_SUFF(pVM);
14476 for (;;)
14477 {
14478 /*
14479 * Log the state.
14480 */
14481#ifdef LOG_ENABLED
14482 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14483#endif
14484
14485 /*
14486 * Do the decoding and emulation.
14487 */
14488 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14489
14490 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14491 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14492
14493 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14494 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14495 {
14496 pStats->cExits += 1;
14497 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14498 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14499 cInstructionSinceLastExit = 0;
14500 }
14501
14502 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14503 {
14504 Assert(pVCpu->iem.s.cActiveMappings == 0);
14505 pVCpu->iem.s.cInstructions++;
14506 pStats->cInstructions++;
14507 cInstructionSinceLastExit++;
14508 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14509 {
14510 uint64_t fCpu = pVCpu->fLocalForcedActions
14511 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14512 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14513 | VMCPU_FF_TLB_FLUSH
14514#ifdef VBOX_WITH_RAW_MODE
14515 | VMCPU_FF_TRPM_SYNC_IDT
14516 | VMCPU_FF_SELM_SYNC_TSS
14517 | VMCPU_FF_SELM_SYNC_GDT
14518 | VMCPU_FF_SELM_SYNC_LDT
14519#endif
14520 | VMCPU_FF_INHIBIT_INTERRUPTS
14521 | VMCPU_FF_BLOCK_NMIS
14522 | VMCPU_FF_UNHALT ));
14523
14524 if (RT_LIKELY( ( ( !fCpu
14525 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14526 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14527 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14528 || pStats->cInstructions < cMinInstructions))
14529 {
14530 if (pStats->cInstructions < cMaxInstructions)
14531 {
14532 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14533 {
14534#ifdef IN_RING0
14535 if ( !fCheckPreemptionPending
14536 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14537#endif
14538 {
14539 Assert(pVCpu->iem.s.cActiveMappings == 0);
14540 iemReInitDecoder(pVCpu);
14541 continue;
14542 }
14543#ifdef IN_RING0
14544 rcStrict = VINF_EM_RAW_INTERRUPT;
14545 break;
14546#endif
14547 }
14548 }
14549 }
14550 Assert(!(fCpu & VMCPU_FF_IEM));
14551 }
14552 Assert(pVCpu->iem.s.cActiveMappings == 0);
14553 }
14554 else if (pVCpu->iem.s.cActiveMappings > 0)
14555 iemMemRollback(pVCpu);
14556 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14557 break;
14558 }
14559 }
14560#ifdef IEM_WITH_SETJMP
14561 else
14562 {
14563 if (pVCpu->iem.s.cActiveMappings > 0)
14564 iemMemRollback(pVCpu);
14565 pVCpu->iem.s.cLongJumps++;
14566 }
14567 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14568#endif
14569
14570 /*
14571 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14572 */
14573 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14574 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14575 }
14576 else
14577 {
14578 if (pVCpu->iem.s.cActiveMappings > 0)
14579 iemMemRollback(pVCpu);
14580
14581#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14582 /*
14583 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14584 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14585 */
14586 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14587#endif
14588 }
14589
14590 /*
14591 * Maybe re-enter raw-mode and log.
14592 */
14593#ifdef IN_RC
14594 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14595#endif
14596 if (rcStrict != VINF_SUCCESS)
14597 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14598 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14599 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14600 return rcStrict;
14601}
14602
14603
14604/**
14605 * Injects a trap, fault, abort, software interrupt or external interrupt.
14606 *
14607 * The parameter list matches TRPMQueryTrapAll pretty closely.
14608 *
14609 * @returns Strict VBox status code.
14610 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14611 * @param u8TrapNo The trap number.
14612 * @param enmType What type is it (trap/fault/abort), software
14613 * interrupt or hardware interrupt.
14614 * @param uErrCode The error code if applicable.
14615 * @param uCr2 The CR2 value if applicable.
14616 * @param cbInstr The instruction length (only relevant for
14617 * software interrupts).
14618 */
14619VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14620 uint8_t cbInstr)
14621{
14622 iemInitDecoder(pVCpu, false);
14623#ifdef DBGFTRACE_ENABLED
14624 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14625 u8TrapNo, enmType, uErrCode, uCr2);
14626#endif
14627
14628 uint32_t fFlags;
14629 switch (enmType)
14630 {
14631 case TRPM_HARDWARE_INT:
14632 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14633 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14634 uErrCode = uCr2 = 0;
14635 break;
14636
14637 case TRPM_SOFTWARE_INT:
14638 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14639 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14640 uErrCode = uCr2 = 0;
14641 break;
14642
14643 case TRPM_TRAP:
14644 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14645 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14646 if (u8TrapNo == X86_XCPT_PF)
14647 fFlags |= IEM_XCPT_FLAGS_CR2;
14648 switch (u8TrapNo)
14649 {
14650 case X86_XCPT_DF:
14651 case X86_XCPT_TS:
14652 case X86_XCPT_NP:
14653 case X86_XCPT_SS:
14654 case X86_XCPT_PF:
14655 case X86_XCPT_AC:
14656 fFlags |= IEM_XCPT_FLAGS_ERR;
14657 break;
14658
14659 case X86_XCPT_NMI:
14660 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14661 break;
14662 }
14663 break;
14664
14665 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14666 }
14667
14668 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14669
14670 if (pVCpu->iem.s.cActiveMappings > 0)
14671 iemMemRollback(pVCpu);
14672
14673 return rcStrict;
14674}
14675
14676
14677/**
14678 * Injects the active TRPM event.
14679 *
14680 * @returns Strict VBox status code.
14681 * @param pVCpu The cross context virtual CPU structure.
14682 */
14683VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14684{
14685#ifndef IEM_IMPLEMENTS_TASKSWITCH
14686 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14687#else
14688 uint8_t u8TrapNo;
14689 TRPMEVENT enmType;
14690 RTGCUINT uErrCode;
14691 RTGCUINTPTR uCr2;
14692 uint8_t cbInstr;
14693 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14694 if (RT_FAILURE(rc))
14695 return rc;
14696
14697 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14698# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14699 if (rcStrict == VINF_SVM_VMEXIT)
14700 rcStrict = VINF_SUCCESS;
14701# endif
14702
14703 /** @todo Are there any other codes that imply the event was successfully
14704 * delivered to the guest? See @bugref{6607}. */
14705 if ( rcStrict == VINF_SUCCESS
14706 || rcStrict == VINF_IEM_RAISED_XCPT)
14707 TRPMResetTrap(pVCpu);
14708
14709 return rcStrict;
14710#endif
14711}
14712
14713
14714VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14715{
14716 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14717 return VERR_NOT_IMPLEMENTED;
14718}
14719
14720
14721VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14722{
14723 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14724 return VERR_NOT_IMPLEMENTED;
14725}
14726
14727
14728#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14729/**
14730 * Executes a IRET instruction with default operand size.
14731 *
14732 * This is for PATM.
14733 *
14734 * @returns VBox status code.
14735 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14736 * @param pCtxCore The register frame.
14737 */
14738VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14739{
14740 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14741
14742 iemCtxCoreToCtx(pCtx, pCtxCore);
14743 iemInitDecoder(pVCpu);
14744 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14745 if (rcStrict == VINF_SUCCESS)
14746 iemCtxToCtxCore(pCtxCore, pCtx);
14747 else
14748 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14749 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14750 return rcStrict;
14751}
14752#endif
14753
14754
14755/**
14756 * Macro used by the IEMExec* method to check the given instruction length.
14757 *
14758 * Will return on failure!
14759 *
14760 * @param a_cbInstr The given instruction length.
14761 * @param a_cbMin The minimum length.
14762 */
14763#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14764 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14765 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14766
14767
14768/**
14769 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14770 *
14771 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14772 *
14773 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14775 * @param rcStrict The status code to fiddle.
14776 */
14777DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14778{
14779 iemUninitExec(pVCpu);
14780#ifdef IN_RC
14781 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14782#else
14783 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14784#endif
14785}
14786
14787
14788/**
14789 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14790 *
14791 * This API ASSUMES that the caller has already verified that the guest code is
14792 * allowed to access the I/O port. (The I/O port is in the DX register in the
14793 * guest state.)
14794 *
14795 * @returns Strict VBox status code.
14796 * @param pVCpu The cross context virtual CPU structure.
14797 * @param cbValue The size of the I/O port access (1, 2, or 4).
14798 * @param enmAddrMode The addressing mode.
14799 * @param fRepPrefix Indicates whether a repeat prefix is used
14800 * (doesn't matter which for this instruction).
14801 * @param cbInstr The instruction length in bytes.
14802 * @param iEffSeg The effective segment address.
14803 * @param fIoChecked Whether the access to the I/O port has been
14804 * checked or not. It's typically checked in the
14805 * HM scenario.
14806 */
14807VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14808 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14809{
14810 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14811 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14812
14813 /*
14814 * State init.
14815 */
14816 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14817
14818 /*
14819 * Switch orgy for getting to the right handler.
14820 */
14821 VBOXSTRICTRC rcStrict;
14822 if (fRepPrefix)
14823 {
14824 switch (enmAddrMode)
14825 {
14826 case IEMMODE_16BIT:
14827 switch (cbValue)
14828 {
14829 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14830 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14831 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14832 default:
14833 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14834 }
14835 break;
14836
14837 case IEMMODE_32BIT:
14838 switch (cbValue)
14839 {
14840 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14841 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14842 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14843 default:
14844 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14845 }
14846 break;
14847
14848 case IEMMODE_64BIT:
14849 switch (cbValue)
14850 {
14851 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14852 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14853 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14854 default:
14855 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14856 }
14857 break;
14858
14859 default:
14860 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14861 }
14862 }
14863 else
14864 {
14865 switch (enmAddrMode)
14866 {
14867 case IEMMODE_16BIT:
14868 switch (cbValue)
14869 {
14870 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14871 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14872 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14873 default:
14874 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14875 }
14876 break;
14877
14878 case IEMMODE_32BIT:
14879 switch (cbValue)
14880 {
14881 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14882 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14883 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14884 default:
14885 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14886 }
14887 break;
14888
14889 case IEMMODE_64BIT:
14890 switch (cbValue)
14891 {
14892 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14893 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14894 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14895 default:
14896 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14897 }
14898 break;
14899
14900 default:
14901 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14902 }
14903 }
14904
14905 if (pVCpu->iem.s.cActiveMappings)
14906 iemMemRollback(pVCpu);
14907
14908 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14909}
14910
14911
14912/**
14913 * Interface for HM and EM for executing string I/O IN (read) instructions.
14914 *
14915 * This API ASSUMES that the caller has already verified that the guest code is
14916 * allowed to access the I/O port. (The I/O port is in the DX register in the
14917 * guest state.)
14918 *
14919 * @returns Strict VBox status code.
14920 * @param pVCpu The cross context virtual CPU structure.
14921 * @param cbValue The size of the I/O port access (1, 2, or 4).
14922 * @param enmAddrMode The addressing mode.
14923 * @param fRepPrefix Indicates whether a repeat prefix is used
14924 * (doesn't matter which for this instruction).
14925 * @param cbInstr The instruction length in bytes.
14926 * @param fIoChecked Whether the access to the I/O port has been
14927 * checked or not. It's typically checked in the
14928 * HM scenario.
14929 */
14930VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14931 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14932{
14933 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14934
14935 /*
14936 * State init.
14937 */
14938 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14939
14940 /*
14941 * Switch orgy for getting to the right handler.
14942 */
14943 VBOXSTRICTRC rcStrict;
14944 if (fRepPrefix)
14945 {
14946 switch (enmAddrMode)
14947 {
14948 case IEMMODE_16BIT:
14949 switch (cbValue)
14950 {
14951 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14952 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14953 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14954 default:
14955 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14956 }
14957 break;
14958
14959 case IEMMODE_32BIT:
14960 switch (cbValue)
14961 {
14962 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14963 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14964 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14965 default:
14966 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14967 }
14968 break;
14969
14970 case IEMMODE_64BIT:
14971 switch (cbValue)
14972 {
14973 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14974 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14975 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14976 default:
14977 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14978 }
14979 break;
14980
14981 default:
14982 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14983 }
14984 }
14985 else
14986 {
14987 switch (enmAddrMode)
14988 {
14989 case IEMMODE_16BIT:
14990 switch (cbValue)
14991 {
14992 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14993 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14994 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14995 default:
14996 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14997 }
14998 break;
14999
15000 case IEMMODE_32BIT:
15001 switch (cbValue)
15002 {
15003 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15004 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15005 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15006 default:
15007 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15008 }
15009 break;
15010
15011 case IEMMODE_64BIT:
15012 switch (cbValue)
15013 {
15014 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15015 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15016 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15017 default:
15018 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15019 }
15020 break;
15021
15022 default:
15023 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15024 }
15025 }
15026
15027 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15028 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15029}
15030
15031
15032/**
15033 * Interface for rawmode to write execute an OUT instruction.
15034 *
15035 * @returns Strict VBox status code.
15036 * @param pVCpu The cross context virtual CPU structure.
15037 * @param cbInstr The instruction length in bytes.
15038 * @param u16Port The port to read.
15039 * @param fImm Whether the port is specified using an immediate operand or
15040 * using the implicit DX register.
15041 * @param cbReg The register size.
15042 *
15043 * @remarks In ring-0 not all of the state needs to be synced in.
15044 */
15045VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15046{
15047 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15048 Assert(cbReg <= 4 && cbReg != 3);
15049
15050 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15051 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15052 Assert(!pVCpu->iem.s.cActiveMappings);
15053 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15054}
15055
15056
15057/**
15058 * Interface for rawmode to write execute an IN instruction.
15059 *
15060 * @returns Strict VBox status code.
15061 * @param pVCpu The cross context virtual CPU structure.
15062 * @param cbInstr The instruction length in bytes.
15063 * @param u16Port The port to read.
15064 * @param fImm Whether the port is specified using an immediate operand or
15065 * using the implicit DX.
15066 * @param cbReg The register size.
15067 */
15068VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15069{
15070 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15071 Assert(cbReg <= 4 && cbReg != 3);
15072
15073 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15074 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15075 Assert(!pVCpu->iem.s.cActiveMappings);
15076 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15077}
15078
15079
15080/**
15081 * Interface for HM and EM to write to a CRx register.
15082 *
15083 * @returns Strict VBox status code.
15084 * @param pVCpu The cross context virtual CPU structure.
15085 * @param cbInstr The instruction length in bytes.
15086 * @param iCrReg The control register number (destination).
15087 * @param iGReg The general purpose register number (source).
15088 *
15089 * @remarks In ring-0 not all of the state needs to be synced in.
15090 */
15091VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15092{
15093 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15094 Assert(iCrReg < 16);
15095 Assert(iGReg < 16);
15096
15097 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15098 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15099 Assert(!pVCpu->iem.s.cActiveMappings);
15100 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15101}
15102
15103
15104/**
15105 * Interface for HM and EM to read from a CRx register.
15106 *
15107 * @returns Strict VBox status code.
15108 * @param pVCpu The cross context virtual CPU structure.
15109 * @param cbInstr The instruction length in bytes.
15110 * @param iGReg The general purpose register number (destination).
15111 * @param iCrReg The control register number (source).
15112 *
15113 * @remarks In ring-0 not all of the state needs to be synced in.
15114 */
15115VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15116{
15117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15118 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15119 | CPUMCTX_EXTRN_APIC_TPR);
15120 Assert(iCrReg < 16);
15121 Assert(iGReg < 16);
15122
15123 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15124 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15125 Assert(!pVCpu->iem.s.cActiveMappings);
15126 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15127}
15128
15129
15130/**
15131 * Interface for HM and EM to clear the CR0[TS] bit.
15132 *
15133 * @returns Strict VBox status code.
15134 * @param pVCpu The cross context virtual CPU structure.
15135 * @param cbInstr The instruction length in bytes.
15136 *
15137 * @remarks In ring-0 not all of the state needs to be synced in.
15138 */
15139VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15140{
15141 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15142
15143 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15144 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15145 Assert(!pVCpu->iem.s.cActiveMappings);
15146 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15147}
15148
15149
15150/**
15151 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15152 *
15153 * @returns Strict VBox status code.
15154 * @param pVCpu The cross context virtual CPU structure.
15155 * @param cbInstr The instruction length in bytes.
15156 * @param uValue The value to load into CR0.
15157 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15158 * memory operand. Otherwise pass NIL_RTGCPTR.
15159 *
15160 * @remarks In ring-0 not all of the state needs to be synced in.
15161 */
15162VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15163{
15164 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15165
15166 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15167 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15168 Assert(!pVCpu->iem.s.cActiveMappings);
15169 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15170}
15171
15172
15173/**
15174 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15175 *
15176 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15177 *
15178 * @returns Strict VBox status code.
15179 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15180 * @param cbInstr The instruction length in bytes.
15181 * @remarks In ring-0 not all of the state needs to be synced in.
15182 * @thread EMT(pVCpu)
15183 */
15184VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15185{
15186 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15187
15188 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15189 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15190 Assert(!pVCpu->iem.s.cActiveMappings);
15191 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15192}
15193
15194
15195/**
15196 * Interface for HM and EM to emulate the WBINVD instruction.
15197 *
15198 * @returns Strict VBox status code.
15199 * @param pVCpu The cross context virtual CPU structure.
15200 * @param cbInstr The instruction length in bytes.
15201 *
15202 * @remarks In ring-0 not all of the state needs to be synced in.
15203 */
15204VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15205{
15206 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15207
15208 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15209 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15210 Assert(!pVCpu->iem.s.cActiveMappings);
15211 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15212}
15213
15214
15215/**
15216 * Interface for HM and EM to emulate the INVD instruction.
15217 *
15218 * @returns Strict VBox status code.
15219 * @param pVCpu The cross context virtual CPU structure.
15220 * @param cbInstr The instruction length in bytes.
15221 *
15222 * @remarks In ring-0 not all of the state needs to be synced in.
15223 */
15224VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15225{
15226 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15227
15228 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15229 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15230 Assert(!pVCpu->iem.s.cActiveMappings);
15231 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15232}
15233
15234
15235/**
15236 * Interface for HM and EM to emulate the INVLPG instruction.
15237 *
15238 * @returns Strict VBox status code.
15239 * @retval VINF_PGM_SYNC_CR3
15240 *
15241 * @param pVCpu The cross context virtual CPU structure.
15242 * @param cbInstr The instruction length in bytes.
15243 * @param GCPtrPage The effective address of the page to invalidate.
15244 *
15245 * @remarks In ring-0 not all of the state needs to be synced in.
15246 */
15247VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15248{
15249 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15250
15251 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15252 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15253 Assert(!pVCpu->iem.s.cActiveMappings);
15254 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15255}
15256
15257
15258/**
15259 * Interface for HM and EM to emulate the CPUID instruction.
15260 *
15261 * @returns Strict VBox status code.
15262 *
15263 * @param pVCpu The cross context virtual CPU structure.
15264 * @param cbInstr The instruction length in bytes.
15265 *
15266 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15267 */
15268VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15269{
15270 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15271 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15272
15273 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15274 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15275 Assert(!pVCpu->iem.s.cActiveMappings);
15276 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15277}
15278
15279
15280/**
15281 * Interface for HM and EM to emulate the RDPMC instruction.
15282 *
15283 * @returns Strict VBox status code.
15284 *
15285 * @param pVCpu The cross context virtual CPU structure.
15286 * @param cbInstr The instruction length in bytes.
15287 *
15288 * @remarks Not all of the state needs to be synced in.
15289 */
15290VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15291{
15292 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15293 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15294
15295 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15296 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15297 Assert(!pVCpu->iem.s.cActiveMappings);
15298 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15299}
15300
15301
15302/**
15303 * Interface for HM and EM to emulate the RDTSC instruction.
15304 *
15305 * @returns Strict VBox status code.
15306 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15307 *
15308 * @param pVCpu The cross context virtual CPU structure.
15309 * @param cbInstr The instruction length in bytes.
15310 *
15311 * @remarks Not all of the state needs to be synced in.
15312 */
15313VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15314{
15315 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15316 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15317
15318 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15319 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15320 Assert(!pVCpu->iem.s.cActiveMappings);
15321 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15322}
15323
15324
15325/**
15326 * Interface for HM and EM to emulate the RDTSCP instruction.
15327 *
15328 * @returns Strict VBox status code.
15329 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15330 *
15331 * @param pVCpu The cross context virtual CPU structure.
15332 * @param cbInstr The instruction length in bytes.
15333 *
15334 * @remarks Not all of the state needs to be synced in. Recommended
15335 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15336 */
15337VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15338{
15339 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15340 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15341
15342 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15343 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15344 Assert(!pVCpu->iem.s.cActiveMappings);
15345 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15346}
15347
15348
15349/**
15350 * Interface for HM and EM to emulate the RDMSR instruction.
15351 *
15352 * @returns Strict VBox status code.
15353 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15354 *
15355 * @param pVCpu The cross context virtual CPU structure.
15356 * @param cbInstr The instruction length in bytes.
15357 *
15358 * @remarks Not all of the state needs to be synced in. Requires RCX and
15359 * (currently) all MSRs.
15360 */
15361VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15362{
15363 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15364 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15365
15366 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15367 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15368 Assert(!pVCpu->iem.s.cActiveMappings);
15369 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15370}
15371
15372
15373/**
15374 * Interface for HM and EM to emulate the WRMSR instruction.
15375 *
15376 * @returns Strict VBox status code.
15377 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15378 *
15379 * @param pVCpu The cross context virtual CPU structure.
15380 * @param cbInstr The instruction length in bytes.
15381 *
15382 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15383 * and (currently) all MSRs.
15384 */
15385VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15386{
15387 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15388 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15389 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15390
15391 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15392 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15393 Assert(!pVCpu->iem.s.cActiveMappings);
15394 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15395}
15396
15397
15398/**
15399 * Interface for HM and EM to emulate the MONITOR instruction.
15400 *
15401 * @returns Strict VBox status code.
15402 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15403 *
15404 * @param pVCpu The cross context virtual CPU structure.
15405 * @param cbInstr The instruction length in bytes.
15406 *
15407 * @remarks Not all of the state needs to be synced in.
15408 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15409 * are used.
15410 */
15411VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15412{
15413 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15414 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15415
15416 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15417 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15418 Assert(!pVCpu->iem.s.cActiveMappings);
15419 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15420}
15421
15422
15423/**
15424 * Interface for HM and EM to emulate the MWAIT instruction.
15425 *
15426 * @returns Strict VBox status code.
15427 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15428 *
15429 * @param pVCpu The cross context virtual CPU structure.
15430 * @param cbInstr The instruction length in bytes.
15431 *
15432 * @remarks Not all of the state needs to be synced in.
15433 */
15434VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15435{
15436 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15437
15438 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15439 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15440 Assert(!pVCpu->iem.s.cActiveMappings);
15441 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15442}
15443
15444
15445/**
15446 * Interface for HM and EM to emulate the HLT instruction.
15447 *
15448 * @returns Strict VBox status code.
15449 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15450 *
15451 * @param pVCpu The cross context virtual CPU structure.
15452 * @param cbInstr The instruction length in bytes.
15453 *
15454 * @remarks Not all of the state needs to be synced in.
15455 */
15456VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15457{
15458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15459
15460 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15461 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15462 Assert(!pVCpu->iem.s.cActiveMappings);
15463 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15464}
15465
15466
15467/**
15468 * Checks if IEM is in the process of delivering an event (interrupt or
15469 * exception).
15470 *
15471 * @returns true if we're in the process of raising an interrupt or exception,
15472 * false otherwise.
15473 * @param pVCpu The cross context virtual CPU structure.
15474 * @param puVector Where to store the vector associated with the
15475 * currently delivered event, optional.
15476 * @param pfFlags Where to store th event delivery flags (see
15477 * IEM_XCPT_FLAGS_XXX), optional.
15478 * @param puErr Where to store the error code associated with the
15479 * event, optional.
15480 * @param puCr2 Where to store the CR2 associated with the event,
15481 * optional.
15482 * @remarks The caller should check the flags to determine if the error code and
15483 * CR2 are valid for the event.
15484 */
15485VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15486{
15487 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15488 if (fRaisingXcpt)
15489 {
15490 if (puVector)
15491 *puVector = pVCpu->iem.s.uCurXcpt;
15492 if (pfFlags)
15493 *pfFlags = pVCpu->iem.s.fCurXcpt;
15494 if (puErr)
15495 *puErr = pVCpu->iem.s.uCurXcptErr;
15496 if (puCr2)
15497 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15498 }
15499 return fRaisingXcpt;
15500}
15501
15502#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15503
15504/**
15505 * Interface for HM and EM to emulate the CLGI instruction.
15506 *
15507 * @returns Strict VBox status code.
15508 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15509 * @param cbInstr The instruction length in bytes.
15510 * @thread EMT(pVCpu)
15511 */
15512VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15513{
15514 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15515
15516 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15517 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15518 Assert(!pVCpu->iem.s.cActiveMappings);
15519 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15520}
15521
15522
15523/**
15524 * Interface for HM and EM to emulate the STGI instruction.
15525 *
15526 * @returns Strict VBox status code.
15527 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15528 * @param cbInstr The instruction length in bytes.
15529 * @thread EMT(pVCpu)
15530 */
15531VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15532{
15533 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15534
15535 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15536 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15537 Assert(!pVCpu->iem.s.cActiveMappings);
15538 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15539}
15540
15541
15542/**
15543 * Interface for HM and EM to emulate the VMLOAD instruction.
15544 *
15545 * @returns Strict VBox status code.
15546 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15547 * @param cbInstr The instruction length in bytes.
15548 * @thread EMT(pVCpu)
15549 */
15550VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15551{
15552 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15553
15554 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15555 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15556 Assert(!pVCpu->iem.s.cActiveMappings);
15557 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15558}
15559
15560
15561/**
15562 * Interface for HM and EM to emulate the VMSAVE instruction.
15563 *
15564 * @returns Strict VBox status code.
15565 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15566 * @param cbInstr The instruction length in bytes.
15567 * @thread EMT(pVCpu)
15568 */
15569VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15570{
15571 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15572
15573 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15574 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15575 Assert(!pVCpu->iem.s.cActiveMappings);
15576 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15577}
15578
15579
15580/**
15581 * Interface for HM and EM to emulate the INVLPGA instruction.
15582 *
15583 * @returns Strict VBox status code.
15584 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15585 * @param cbInstr The instruction length in bytes.
15586 * @thread EMT(pVCpu)
15587 */
15588VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15589{
15590 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15591
15592 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15593 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15594 Assert(!pVCpu->iem.s.cActiveMappings);
15595 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15596}
15597
15598
15599/**
15600 * Interface for HM and EM to emulate the VMRUN instruction.
15601 *
15602 * @returns Strict VBox status code.
15603 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15604 * @param cbInstr The instruction length in bytes.
15605 * @thread EMT(pVCpu)
15606 */
15607VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15608{
15609 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15610 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15611
15612 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15613 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15614 Assert(!pVCpu->iem.s.cActiveMappings);
15615 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15616}
15617
15618
15619/**
15620 * Interface for HM and EM to emulate \#VMEXIT.
15621 *
15622 * @returns Strict VBox status code.
15623 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15624 * @param uExitCode The exit code.
15625 * @param uExitInfo1 The exit info. 1 field.
15626 * @param uExitInfo2 The exit info. 2 field.
15627 * @thread EMT(pVCpu)
15628 */
15629VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15630{
15631 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15632 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15633 if (pVCpu->iem.s.cActiveMappings)
15634 iemMemRollback(pVCpu);
15635 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15636}
15637
15638#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15639
15640#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15641
15642/**
15643 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15644 *
15645 * @returns Strict VBox status code.
15646 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15647 * @param uVector The external interrupt vector.
15648 * @param fIntPending Whether the external interrupt is pending or
15649 * acknowdledged in the interrupt controller.
15650 * @thread EMT(pVCpu)
15651 */
15652VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15653{
15654 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15655 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15656 if (pVCpu->iem.s.cActiveMappings)
15657 iemMemRollback(pVCpu);
15658 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15659}
15660
15661
15662/**
15663 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15664 *
15665 * @returns Strict VBox status code.
15666 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15667 * @param uExitReason The VM-exit reason.
15668 * @param uExitQual The VM-exit qualification.
15669 *
15670 * @thread EMT(pVCpu)
15671 */
15672VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15673{
15674 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15675 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15676 if (pVCpu->iem.s.cActiveMappings)
15677 iemMemRollback(pVCpu);
15678 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15679}
15680
15681
15682/**
15683 * Interface for HM and EM to emulate the VMREAD instruction.
15684 *
15685 * @returns Strict VBox status code.
15686 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15687 * @param pExitInfo Pointer to the VM-exit information struct.
15688 * @thread EMT(pVCpu)
15689 */
15690VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15691{
15692 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15693 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15694 Assert(pExitInfo);
15695
15696 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15697
15698 VBOXSTRICTRC rcStrict;
15699 uint8_t const cbInstr = pExitInfo->cbInstr;
15700 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15701 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15702 {
15703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15704 {
15705 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15706 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15707 }
15708 else
15709 {
15710 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15711 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15712 }
15713 }
15714 else
15715 {
15716 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15717 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15718 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15719 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15720 }
15721 if (pVCpu->iem.s.cActiveMappings)
15722 iemMemRollback(pVCpu);
15723 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15724}
15725
15726
15727/**
15728 * Interface for HM and EM to emulate the VMWRITE instruction.
15729 *
15730 * @returns Strict VBox status code.
15731 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15732 * @param pExitInfo Pointer to the VM-exit information struct.
15733 * @thread EMT(pVCpu)
15734 */
15735VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15736{
15737 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15738 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15739 Assert(pExitInfo);
15740
15741 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15742
15743 uint64_t u64Val;
15744 uint8_t iEffSeg;
15745 IEMMODE enmEffAddrMode;
15746 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15747 {
15748 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15749 iEffSeg = UINT8_MAX;
15750 enmEffAddrMode = UINT8_MAX;
15751 }
15752 else
15753 {
15754 u64Val = pExitInfo->GCPtrEffAddr;
15755 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15756 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15757 }
15758 uint8_t const cbInstr = pExitInfo->cbInstr;
15759 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15760 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15761 if (pVCpu->iem.s.cActiveMappings)
15762 iemMemRollback(pVCpu);
15763 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15764}
15765
15766
15767/**
15768 * Interface for HM and EM to emulate the VMPTRLD instruction.
15769 *
15770 * @returns Strict VBox status code.
15771 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15772 * @param pExitInfo Pointer to the VM-exit information struct.
15773 * @thread EMT(pVCpu)
15774 */
15775VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15776{
15777 Assert(pExitInfo);
15778 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15779 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15780
15781 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15782
15783 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15784 uint8_t const cbInstr = pExitInfo->cbInstr;
15785 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15786 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15787 if (pVCpu->iem.s.cActiveMappings)
15788 iemMemRollback(pVCpu);
15789 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15790}
15791
15792
15793/**
15794 * Interface for HM and EM to emulate the VMPTRST instruction.
15795 *
15796 * @returns Strict VBox status code.
15797 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15798 * @param pExitInfo Pointer to the VM-exit information struct.
15799 * @thread EMT(pVCpu)
15800 */
15801VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15802{
15803 Assert(pExitInfo);
15804 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15805 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15806
15807 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15808
15809 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15810 uint8_t const cbInstr = pExitInfo->cbInstr;
15811 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15812 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15813 if (pVCpu->iem.s.cActiveMappings)
15814 iemMemRollback(pVCpu);
15815 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15816}
15817
15818
15819/**
15820 * Interface for HM and EM to emulate the VMCLEAR instruction.
15821 *
15822 * @returns Strict VBox status code.
15823 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15824 * @param pExitInfo Pointer to the VM-exit information struct.
15825 * @thread EMT(pVCpu)
15826 */
15827VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15828{
15829 Assert(pExitInfo);
15830 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15831 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15832
15833 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15834
15835 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15836 uint8_t const cbInstr = pExitInfo->cbInstr;
15837 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15838 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15839 if (pVCpu->iem.s.cActiveMappings)
15840 iemMemRollback(pVCpu);
15841 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15842}
15843
15844
15845/**
15846 * Interface for HM and EM to emulate the VMXON instruction.
15847 *
15848 * @returns Strict VBox status code.
15849 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15850 * @param pExitInfo Pointer to the VM-exit information struct.
15851 * @thread EMT(pVCpu)
15852 */
15853VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15854{
15855 Assert(pExitInfo);
15856 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15857 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15858
15859 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15860
15861 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15862 uint8_t const cbInstr = pExitInfo->cbInstr;
15863 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15864 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15865 if (pVCpu->iem.s.cActiveMappings)
15866 iemMemRollback(pVCpu);
15867 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15868}
15869
15870
15871/**
15872 * Interface for HM and EM to emulate the VMXOFF instruction.
15873 *
15874 * @returns Strict VBox status code.
15875 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15876 * @param cbInstr The instruction length in bytes.
15877 * @thread EMT(pVCpu)
15878 */
15879VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15880{
15881 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15882 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HM_VMX_MASK);
15883
15884 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15885 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15886 Assert(!pVCpu->iem.s.cActiveMappings);
15887 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15888}
15889
15890#endif
15891
15892#ifdef IN_RING3
15893
15894/**
15895 * Handles the unlikely and probably fatal merge cases.
15896 *
15897 * @returns Merged status code.
15898 * @param rcStrict Current EM status code.
15899 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15900 * with @a rcStrict.
15901 * @param iMemMap The memory mapping index. For error reporting only.
15902 * @param pVCpu The cross context virtual CPU structure of the calling
15903 * thread, for error reporting only.
15904 */
15905DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15906 unsigned iMemMap, PVMCPU pVCpu)
15907{
15908 if (RT_FAILURE_NP(rcStrict))
15909 return rcStrict;
15910
15911 if (RT_FAILURE_NP(rcStrictCommit))
15912 return rcStrictCommit;
15913
15914 if (rcStrict == rcStrictCommit)
15915 return rcStrictCommit;
15916
15917 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15918 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15919 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15920 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15922 return VERR_IOM_FF_STATUS_IPE;
15923}
15924
15925
15926/**
15927 * Helper for IOMR3ProcessForceFlag.
15928 *
15929 * @returns Merged status code.
15930 * @param rcStrict Current EM status code.
15931 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15932 * with @a rcStrict.
15933 * @param iMemMap The memory mapping index. For error reporting only.
15934 * @param pVCpu The cross context virtual CPU structure of the calling
15935 * thread, for error reporting only.
15936 */
15937DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15938{
15939 /* Simple. */
15940 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15941 return rcStrictCommit;
15942
15943 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15944 return rcStrict;
15945
15946 /* EM scheduling status codes. */
15947 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15948 && rcStrict <= VINF_EM_LAST))
15949 {
15950 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15951 && rcStrictCommit <= VINF_EM_LAST))
15952 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15953 }
15954
15955 /* Unlikely */
15956 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15957}
15958
15959
15960/**
15961 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15962 *
15963 * @returns Merge between @a rcStrict and what the commit operation returned.
15964 * @param pVM The cross context VM structure.
15965 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15966 * @param rcStrict The status code returned by ring-0 or raw-mode.
15967 */
15968VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15969{
15970 /*
15971 * Reset the pending commit.
15972 */
15973 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15974 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15975 ("%#x %#x %#x\n",
15976 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15977 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15978
15979 /*
15980 * Commit the pending bounce buffers (usually just one).
15981 */
15982 unsigned cBufs = 0;
15983 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15984 while (iMemMap-- > 0)
15985 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15986 {
15987 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15988 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15989 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15990
15991 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15992 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15993 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15994
15995 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15996 {
15997 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15998 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15999 pbBuf,
16000 cbFirst,
16001 PGMACCESSORIGIN_IEM);
16002 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16003 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16004 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16005 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16006 }
16007
16008 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16009 {
16010 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16011 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16012 pbBuf + cbFirst,
16013 cbSecond,
16014 PGMACCESSORIGIN_IEM);
16015 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16016 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16017 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16018 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16019 }
16020 cBufs++;
16021 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16022 }
16023
16024 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16025 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16026 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16027 pVCpu->iem.s.cActiveMappings = 0;
16028 return rcStrict;
16029}
16030
16031#endif /* IN_RING3 */
16032
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette