VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 77847

Last change on this file since 77847 was 77717, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Added IEMExecVmxVmexitNmi. Might need to eventually do a more generic one that covers hardware exceptions as well as software ints. For now this will do.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 647.5 KB
Line 
1/* $Id: IEMAll.cpp 77717 2019-03-15 09:21:42Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handle for triple faults.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
450 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmi(PVMCPU pVCpu);
987IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
988IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
989IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
990IEM_STATIC VBOXSTRICTRC iemVmxVmexitNmiWindow(PVMCPU pVCpu);
991IEM_STATIC VBOXSTRICTRC iemVmxVmexitMtf(PVMCPU pVCpu);
992IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
993IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
994IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
995IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
996#endif
997
998#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
999IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
1000IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
1001#endif
1002
1003
1004/**
1005 * Sets the pass up status.
1006 *
1007 * @returns VINF_SUCCESS.
1008 * @param pVCpu The cross context virtual CPU structure of the
1009 * calling thread.
1010 * @param rcPassUp The pass up status. Must be informational.
1011 * VINF_SUCCESS is not allowed.
1012 */
1013IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1014{
1015 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1016
1017 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1018 if (rcOldPassUp == VINF_SUCCESS)
1019 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1020 /* If both are EM scheduling codes, use EM priority rules. */
1021 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1022 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1023 {
1024 if (rcPassUp < rcOldPassUp)
1025 {
1026 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1028 }
1029 else
1030 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1031 }
1032 /* Override EM scheduling with specific status code. */
1033 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1034 {
1035 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1036 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1037 }
1038 /* Don't override specific status code, first come first served. */
1039 else
1040 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1041 return VINF_SUCCESS;
1042}
1043
1044
1045/**
1046 * Calculates the CPU mode.
1047 *
1048 * This is mainly for updating IEMCPU::enmCpuMode.
1049 *
1050 * @returns CPU mode.
1051 * @param pVCpu The cross context virtual CPU structure of the
1052 * calling thread.
1053 */
1054DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1055{
1056 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1057 return IEMMODE_64BIT;
1058 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1059 return IEMMODE_32BIT;
1060 return IEMMODE_16BIT;
1061}
1062
1063
1064/**
1065 * Initializes the execution state.
1066 *
1067 * @param pVCpu The cross context virtual CPU structure of the
1068 * calling thread.
1069 * @param fBypassHandlers Whether to bypass access handlers.
1070 *
1071 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1072 * side-effects in strict builds.
1073 */
1074DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1075{
1076 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1077 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1078
1079#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1084 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1086 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1087 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1088#endif
1089
1090#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1091 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1092#endif
1093 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1094 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1095#ifdef VBOX_STRICT
1096 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1097 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1098 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1099 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1100 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1101 pVCpu->iem.s.uRexReg = 127;
1102 pVCpu->iem.s.uRexB = 127;
1103 pVCpu->iem.s.offModRm = 127;
1104 pVCpu->iem.s.uRexIndex = 127;
1105 pVCpu->iem.s.iEffSeg = 127;
1106 pVCpu->iem.s.idxPrefix = 127;
1107 pVCpu->iem.s.uVex3rdReg = 127;
1108 pVCpu->iem.s.uVexLength = 127;
1109 pVCpu->iem.s.fEvexStuff = 127;
1110 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1111# ifdef IEM_WITH_CODE_TLB
1112 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1113 pVCpu->iem.s.pbInstrBuf = NULL;
1114 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1115 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1116 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1117 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1118# else
1119 pVCpu->iem.s.offOpcode = 127;
1120 pVCpu->iem.s.cbOpcode = 127;
1121# endif
1122#endif
1123
1124 pVCpu->iem.s.cActiveMappings = 0;
1125 pVCpu->iem.s.iNextMapping = 0;
1126 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1127 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1128#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1129 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1130 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1131 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1132 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1133 if (!pVCpu->iem.s.fInPatchCode)
1134 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1135#endif
1136}
1137
1138#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1139/**
1140 * Performs a minimal reinitialization of the execution state.
1141 *
1142 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1143 * 'world-switch' types operations on the CPU. Currently only nested
1144 * hardware-virtualization uses it.
1145 *
1146 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1147 */
1148IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1149{
1150 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1151 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1152
1153 pVCpu->iem.s.uCpl = uCpl;
1154 pVCpu->iem.s.enmCpuMode = enmMode;
1155 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1156 pVCpu->iem.s.enmEffAddrMode = enmMode;
1157 if (enmMode != IEMMODE_64BIT)
1158 {
1159 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1160 pVCpu->iem.s.enmEffOpSize = enmMode;
1161 }
1162 else
1163 {
1164 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1165 pVCpu->iem.s.enmEffOpSize = enmMode;
1166 }
1167 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1168#ifndef IEM_WITH_CODE_TLB
1169 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1170 pVCpu->iem.s.offOpcode = 0;
1171 pVCpu->iem.s.cbOpcode = 0;
1172#endif
1173 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1174}
1175#endif
1176
1177/**
1178 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1179 *
1180 * @param pVCpu The cross context virtual CPU structure of the
1181 * calling thread.
1182 */
1183DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1184{
1185 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1186#ifdef VBOX_STRICT
1187# ifdef IEM_WITH_CODE_TLB
1188 NOREF(pVCpu);
1189# else
1190 pVCpu->iem.s.cbOpcode = 0;
1191# endif
1192#else
1193 NOREF(pVCpu);
1194#endif
1195}
1196
1197
1198/**
1199 * Initializes the decoder state.
1200 *
1201 * iemReInitDecoder is mostly a copy of this function.
1202 *
1203 * @param pVCpu The cross context virtual CPU structure of the
1204 * calling thread.
1205 * @param fBypassHandlers Whether to bypass access handlers.
1206 */
1207DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1208{
1209 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1210 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1211
1212#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1221#endif
1222
1223#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1224 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1225#endif
1226 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1227 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1228 pVCpu->iem.s.enmCpuMode = enmMode;
1229 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1230 pVCpu->iem.s.enmEffAddrMode = enmMode;
1231 if (enmMode != IEMMODE_64BIT)
1232 {
1233 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1234 pVCpu->iem.s.enmEffOpSize = enmMode;
1235 }
1236 else
1237 {
1238 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1239 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1240 }
1241 pVCpu->iem.s.fPrefixes = 0;
1242 pVCpu->iem.s.uRexReg = 0;
1243 pVCpu->iem.s.uRexB = 0;
1244 pVCpu->iem.s.uRexIndex = 0;
1245 pVCpu->iem.s.idxPrefix = 0;
1246 pVCpu->iem.s.uVex3rdReg = 0;
1247 pVCpu->iem.s.uVexLength = 0;
1248 pVCpu->iem.s.fEvexStuff = 0;
1249 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1250#ifdef IEM_WITH_CODE_TLB
1251 pVCpu->iem.s.pbInstrBuf = NULL;
1252 pVCpu->iem.s.offInstrNextByte = 0;
1253 pVCpu->iem.s.offCurInstrStart = 0;
1254# ifdef VBOX_STRICT
1255 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1256 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1257 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1258# endif
1259#else
1260 pVCpu->iem.s.offOpcode = 0;
1261 pVCpu->iem.s.cbOpcode = 0;
1262#endif
1263 pVCpu->iem.s.offModRm = 0;
1264 pVCpu->iem.s.cActiveMappings = 0;
1265 pVCpu->iem.s.iNextMapping = 0;
1266 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1267 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1268#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1269 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1270 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1271 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1272 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1273 if (!pVCpu->iem.s.fInPatchCode)
1274 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1275#endif
1276
1277#ifdef DBGFTRACE_ENABLED
1278 switch (enmMode)
1279 {
1280 case IEMMODE_64BIT:
1281 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1282 break;
1283 case IEMMODE_32BIT:
1284 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1285 break;
1286 case IEMMODE_16BIT:
1287 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1288 break;
1289 }
1290#endif
1291}
1292
1293
1294/**
1295 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1296 *
1297 * This is mostly a copy of iemInitDecoder.
1298 *
1299 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1300 */
1301DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1302{
1303 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1304
1305#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1312 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1313 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1314#endif
1315
1316 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1317 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1318 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1319 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1320 pVCpu->iem.s.enmEffAddrMode = enmMode;
1321 if (enmMode != IEMMODE_64BIT)
1322 {
1323 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1324 pVCpu->iem.s.enmEffOpSize = enmMode;
1325 }
1326 else
1327 {
1328 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1329 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1330 }
1331 pVCpu->iem.s.fPrefixes = 0;
1332 pVCpu->iem.s.uRexReg = 0;
1333 pVCpu->iem.s.uRexB = 0;
1334 pVCpu->iem.s.uRexIndex = 0;
1335 pVCpu->iem.s.idxPrefix = 0;
1336 pVCpu->iem.s.uVex3rdReg = 0;
1337 pVCpu->iem.s.uVexLength = 0;
1338 pVCpu->iem.s.fEvexStuff = 0;
1339 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1340#ifdef IEM_WITH_CODE_TLB
1341 if (pVCpu->iem.s.pbInstrBuf)
1342 {
1343 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1344 - pVCpu->iem.s.uInstrBufPc;
1345 if (off < pVCpu->iem.s.cbInstrBufTotal)
1346 {
1347 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1348 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1349 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1350 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1351 else
1352 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1353 }
1354 else
1355 {
1356 pVCpu->iem.s.pbInstrBuf = NULL;
1357 pVCpu->iem.s.offInstrNextByte = 0;
1358 pVCpu->iem.s.offCurInstrStart = 0;
1359 pVCpu->iem.s.cbInstrBuf = 0;
1360 pVCpu->iem.s.cbInstrBufTotal = 0;
1361 }
1362 }
1363 else
1364 {
1365 pVCpu->iem.s.offInstrNextByte = 0;
1366 pVCpu->iem.s.offCurInstrStart = 0;
1367 pVCpu->iem.s.cbInstrBuf = 0;
1368 pVCpu->iem.s.cbInstrBufTotal = 0;
1369 }
1370#else
1371 pVCpu->iem.s.cbOpcode = 0;
1372 pVCpu->iem.s.offOpcode = 0;
1373#endif
1374 pVCpu->iem.s.offModRm = 0;
1375 Assert(pVCpu->iem.s.cActiveMappings == 0);
1376 pVCpu->iem.s.iNextMapping = 0;
1377 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1378 Assert(pVCpu->iem.s.fBypassHandlers == false);
1379#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1380 if (!pVCpu->iem.s.fInPatchCode)
1381 { /* likely */ }
1382 else
1383 {
1384 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1385 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1386 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1387 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1388 if (!pVCpu->iem.s.fInPatchCode)
1389 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1390 }
1391#endif
1392
1393#ifdef DBGFTRACE_ENABLED
1394 switch (enmMode)
1395 {
1396 case IEMMODE_64BIT:
1397 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1398 break;
1399 case IEMMODE_32BIT:
1400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1401 break;
1402 case IEMMODE_16BIT:
1403 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1404 break;
1405 }
1406#endif
1407}
1408
1409
1410
1411/**
1412 * Prefetch opcodes the first time when starting executing.
1413 *
1414 * @returns Strict VBox status code.
1415 * @param pVCpu The cross context virtual CPU structure of the
1416 * calling thread.
1417 * @param fBypassHandlers Whether to bypass access handlers.
1418 */
1419IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1420{
1421 iemInitDecoder(pVCpu, fBypassHandlers);
1422
1423#ifdef IEM_WITH_CODE_TLB
1424 /** @todo Do ITLB lookup here. */
1425
1426#else /* !IEM_WITH_CODE_TLB */
1427
1428 /*
1429 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1430 *
1431 * First translate CS:rIP to a physical address.
1432 */
1433 uint32_t cbToTryRead;
1434 RTGCPTR GCPtrPC;
1435 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1436 {
1437 cbToTryRead = PAGE_SIZE;
1438 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1439 if (IEM_IS_CANONICAL(GCPtrPC))
1440 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1441 else
1442 return iemRaiseGeneralProtectionFault0(pVCpu);
1443 }
1444 else
1445 {
1446 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1447 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1448 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1449 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1450 else
1451 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1452 if (cbToTryRead) { /* likely */ }
1453 else /* overflowed */
1454 {
1455 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1456 cbToTryRead = UINT32_MAX;
1457 }
1458 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1459 Assert(GCPtrPC <= UINT32_MAX);
1460 }
1461
1462# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1463 /* Allow interpretation of patch manager code blocks since they can for
1464 instance throw #PFs for perfectly good reasons. */
1465 if (pVCpu->iem.s.fInPatchCode)
1466 {
1467 size_t cbRead = 0;
1468 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1469 AssertRCReturn(rc, rc);
1470 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1471 return VINF_SUCCESS;
1472 }
1473# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1474
1475 RTGCPHYS GCPhys;
1476 uint64_t fFlags;
1477 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1478 if (RT_SUCCESS(rc)) { /* probable */ }
1479 else
1480 {
1481 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1482 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1483 }
1484 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1485 else
1486 {
1487 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1488 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1489 }
1490 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1491 else
1492 {
1493 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1494 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1495 }
1496 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1497 /** @todo Check reserved bits and such stuff. PGM is better at doing
1498 * that, so do it when implementing the guest virtual address
1499 * TLB... */
1500
1501 /*
1502 * Read the bytes at this address.
1503 */
1504 PVM pVM = pVCpu->CTX_SUFF(pVM);
1505# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1506 size_t cbActual;
1507 if ( PATMIsEnabled(pVM)
1508 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1509 {
1510 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1511 Assert(cbActual > 0);
1512 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1513 }
1514 else
1515# endif
1516 {
1517 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1518 if (cbToTryRead > cbLeftOnPage)
1519 cbToTryRead = cbLeftOnPage;
1520 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1521 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1522
1523 if (!pVCpu->iem.s.fBypassHandlers)
1524 {
1525 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1526 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1527 { /* likely */ }
1528 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1529 {
1530 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1531 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1532 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1533 }
1534 else
1535 {
1536 Log((RT_SUCCESS(rcStrict)
1537 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1538 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1539 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1540 return rcStrict;
1541 }
1542 }
1543 else
1544 {
1545 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1546 if (RT_SUCCESS(rc))
1547 { /* likely */ }
1548 else
1549 {
1550 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1551 GCPtrPC, GCPhys, rc, cbToTryRead));
1552 return rc;
1553 }
1554 }
1555 pVCpu->iem.s.cbOpcode = cbToTryRead;
1556 }
1557#endif /* !IEM_WITH_CODE_TLB */
1558 return VINF_SUCCESS;
1559}
1560
1561
1562/**
1563 * Invalidates the IEM TLBs.
1564 *
1565 * This is called internally as well as by PGM when moving GC mappings.
1566 *
1567 * @returns
1568 * @param pVCpu The cross context virtual CPU structure of the calling
1569 * thread.
1570 * @param fVmm Set when PGM calls us with a remapping.
1571 */
1572VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1573{
1574#ifdef IEM_WITH_CODE_TLB
1575 pVCpu->iem.s.cbInstrBufTotal = 0;
1576 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1577 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1578 { /* very likely */ }
1579 else
1580 {
1581 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1582 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1583 while (i-- > 0)
1584 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1585 }
1586#endif
1587
1588#ifdef IEM_WITH_DATA_TLB
1589 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1590 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1591 { /* very likely */ }
1592 else
1593 {
1594 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1595 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1596 while (i-- > 0)
1597 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1598 }
1599#endif
1600 NOREF(pVCpu); NOREF(fVmm);
1601}
1602
1603
1604/**
1605 * Invalidates a page in the TLBs.
1606 *
1607 * @param pVCpu The cross context virtual CPU structure of the calling
1608 * thread.
1609 * @param GCPtr The address of the page to invalidate
1610 */
1611VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1612{
1613#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1614 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1615 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1616 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1617 uintptr_t idx = (uint8_t)GCPtr;
1618
1619# ifdef IEM_WITH_CODE_TLB
1620 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1621 {
1622 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1623 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1624 pVCpu->iem.s.cbInstrBufTotal = 0;
1625 }
1626# endif
1627
1628# ifdef IEM_WITH_DATA_TLB
1629 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1630 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1631# endif
1632#else
1633 NOREF(pVCpu); NOREF(GCPtr);
1634#endif
1635}
1636
1637
1638/**
1639 * Invalidates the host physical aspects of the IEM TLBs.
1640 *
1641 * This is called internally as well as by PGM when moving GC mappings.
1642 *
1643 * @param pVCpu The cross context virtual CPU structure of the calling
1644 * thread.
1645 */
1646VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1647{
1648#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1649 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1650
1651# ifdef IEM_WITH_CODE_TLB
1652 pVCpu->iem.s.cbInstrBufTotal = 0;
1653# endif
1654 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1655 if (uTlbPhysRev != 0)
1656 {
1657 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1658 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1659 }
1660 else
1661 {
1662 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1663 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1664
1665 unsigned i;
1666# ifdef IEM_WITH_CODE_TLB
1667 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1668 while (i-- > 0)
1669 {
1670 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1671 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1672 }
1673# endif
1674# ifdef IEM_WITH_DATA_TLB
1675 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1676 while (i-- > 0)
1677 {
1678 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1679 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1680 }
1681# endif
1682 }
1683#else
1684 NOREF(pVCpu);
1685#endif
1686}
1687
1688
1689/**
1690 * Invalidates the host physical aspects of the IEM TLBs.
1691 *
1692 * This is called internally as well as by PGM when moving GC mappings.
1693 *
1694 * @param pVM The cross context VM structure.
1695 *
1696 * @remarks Caller holds the PGM lock.
1697 */
1698VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1699{
1700 RT_NOREF_PV(pVM);
1701}
1702
1703#ifdef IEM_WITH_CODE_TLB
1704
1705/**
1706 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1707 * failure and jumps.
1708 *
1709 * We end up here for a number of reasons:
1710 * - pbInstrBuf isn't yet initialized.
1711 * - Advancing beyond the buffer boundrary (e.g. cross page).
1712 * - Advancing beyond the CS segment limit.
1713 * - Fetching from non-mappable page (e.g. MMIO).
1714 *
1715 * @param pVCpu The cross context virtual CPU structure of the
1716 * calling thread.
1717 * @param pvDst Where to return the bytes.
1718 * @param cbDst Number of bytes to read.
1719 *
1720 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1721 */
1722IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1723{
1724#ifdef IN_RING3
1725 for (;;)
1726 {
1727 Assert(cbDst <= 8);
1728 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1729
1730 /*
1731 * We might have a partial buffer match, deal with that first to make the
1732 * rest simpler. This is the first part of the cross page/buffer case.
1733 */
1734 if (pVCpu->iem.s.pbInstrBuf != NULL)
1735 {
1736 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1737 {
1738 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1739 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1740 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1741
1742 cbDst -= cbCopy;
1743 pvDst = (uint8_t *)pvDst + cbCopy;
1744 offBuf += cbCopy;
1745 pVCpu->iem.s.offInstrNextByte += offBuf;
1746 }
1747 }
1748
1749 /*
1750 * Check segment limit, figuring how much we're allowed to access at this point.
1751 *
1752 * We will fault immediately if RIP is past the segment limit / in non-canonical
1753 * territory. If we do continue, there are one or more bytes to read before we
1754 * end up in trouble and we need to do that first before faulting.
1755 */
1756 RTGCPTR GCPtrFirst;
1757 uint32_t cbMaxRead;
1758 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1759 {
1760 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1761 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1762 { /* likely */ }
1763 else
1764 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1765 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1766 }
1767 else
1768 {
1769 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1770 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1771 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1772 { /* likely */ }
1773 else
1774 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1775 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1776 if (cbMaxRead != 0)
1777 { /* likely */ }
1778 else
1779 {
1780 /* Overflowed because address is 0 and limit is max. */
1781 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1782 cbMaxRead = X86_PAGE_SIZE;
1783 }
1784 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1785 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1786 if (cbMaxRead2 < cbMaxRead)
1787 cbMaxRead = cbMaxRead2;
1788 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1789 }
1790
1791 /*
1792 * Get the TLB entry for this piece of code.
1793 */
1794 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1795 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1796 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1797 if (pTlbe->uTag == uTag)
1798 {
1799 /* likely when executing lots of code, otherwise unlikely */
1800# ifdef VBOX_WITH_STATISTICS
1801 pVCpu->iem.s.CodeTlb.cTlbHits++;
1802# endif
1803 }
1804 else
1805 {
1806 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1807# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1808 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1809 {
1810 pTlbe->uTag = uTag;
1811 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1812 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1813 pTlbe->GCPhys = NIL_RTGCPHYS;
1814 pTlbe->pbMappingR3 = NULL;
1815 }
1816 else
1817# endif
1818 {
1819 RTGCPHYS GCPhys;
1820 uint64_t fFlags;
1821 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1822 if (RT_FAILURE(rc))
1823 {
1824 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1825 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1826 }
1827
1828 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1829 pTlbe->uTag = uTag;
1830 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1831 pTlbe->GCPhys = GCPhys;
1832 pTlbe->pbMappingR3 = NULL;
1833 }
1834 }
1835
1836 /*
1837 * Check TLB page table level access flags.
1838 */
1839 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1840 {
1841 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1842 {
1843 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1844 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1845 }
1846 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1847 {
1848 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1849 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1850 }
1851 }
1852
1853# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1854 /*
1855 * Allow interpretation of patch manager code blocks since they can for
1856 * instance throw #PFs for perfectly good reasons.
1857 */
1858 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1859 { /* no unlikely */ }
1860 else
1861 {
1862 /** @todo Could be optimized this a little in ring-3 if we liked. */
1863 size_t cbRead = 0;
1864 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1865 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1866 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1867 return;
1868 }
1869# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1870
1871 /*
1872 * Look up the physical page info if necessary.
1873 */
1874 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1875 { /* not necessary */ }
1876 else
1877 {
1878 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1879 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1880 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1881 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1882 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1883 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1884 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1885 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1886 }
1887
1888# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1889 /*
1890 * Try do a direct read using the pbMappingR3 pointer.
1891 */
1892 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1893 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1894 {
1895 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1896 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1897 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1898 {
1899 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1900 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1901 }
1902 else
1903 {
1904 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1905 Assert(cbInstr < cbMaxRead);
1906 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1907 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1908 }
1909 if (cbDst <= cbMaxRead)
1910 {
1911 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1912 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1913 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1914 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1915 return;
1916 }
1917 pVCpu->iem.s.pbInstrBuf = NULL;
1918
1919 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1920 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1921 }
1922 else
1923# endif
1924#if 0
1925 /*
1926 * If there is no special read handling, so we can read a bit more and
1927 * put it in the prefetch buffer.
1928 */
1929 if ( cbDst < cbMaxRead
1930 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1931 {
1932 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1933 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1934 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1935 { /* likely */ }
1936 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1937 {
1938 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1939 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1940 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1941 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1942 }
1943 else
1944 {
1945 Log((RT_SUCCESS(rcStrict)
1946 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1947 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1948 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1949 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1950 }
1951 }
1952 /*
1953 * Special read handling, so only read exactly what's needed.
1954 * This is a highly unlikely scenario.
1955 */
1956 else
1957#endif
1958 {
1959 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1960 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1961 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1962 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1963 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1964 { /* likely */ }
1965 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1966 {
1967 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1968 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1969 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1970 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1971 }
1972 else
1973 {
1974 Log((RT_SUCCESS(rcStrict)
1975 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1976 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1977 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1978 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1979 }
1980 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1981 if (cbToRead == cbDst)
1982 return;
1983 }
1984
1985 /*
1986 * More to read, loop.
1987 */
1988 cbDst -= cbMaxRead;
1989 pvDst = (uint8_t *)pvDst + cbMaxRead;
1990 }
1991#else
1992 RT_NOREF(pvDst, cbDst);
1993 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1994#endif
1995}
1996
1997#else
1998
1999/**
2000 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
2001 * exception if it fails.
2002 *
2003 * @returns Strict VBox status code.
2004 * @param pVCpu The cross context virtual CPU structure of the
2005 * calling thread.
2006 * @param cbMin The minimum number of bytes relative offOpcode
2007 * that must be read.
2008 */
2009IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2010{
2011 /*
2012 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2013 *
2014 * First translate CS:rIP to a physical address.
2015 */
2016 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2017 uint32_t cbToTryRead;
2018 RTGCPTR GCPtrNext;
2019 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2020 {
2021 cbToTryRead = PAGE_SIZE;
2022 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2023 if (!IEM_IS_CANONICAL(GCPtrNext))
2024 return iemRaiseGeneralProtectionFault0(pVCpu);
2025 }
2026 else
2027 {
2028 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2029 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2030 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2031 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2032 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2033 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2034 if (!cbToTryRead) /* overflowed */
2035 {
2036 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2037 cbToTryRead = UINT32_MAX;
2038 /** @todo check out wrapping around the code segment. */
2039 }
2040 if (cbToTryRead < cbMin - cbLeft)
2041 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2042 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2043 }
2044
2045 /* Only read up to the end of the page, and make sure we don't read more
2046 than the opcode buffer can hold. */
2047 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2048 if (cbToTryRead > cbLeftOnPage)
2049 cbToTryRead = cbLeftOnPage;
2050 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2051 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2052/** @todo r=bird: Convert assertion into undefined opcode exception? */
2053 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2054
2055# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2056 /* Allow interpretation of patch manager code blocks since they can for
2057 instance throw #PFs for perfectly good reasons. */
2058 if (pVCpu->iem.s.fInPatchCode)
2059 {
2060 size_t cbRead = 0;
2061 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2062 AssertRCReturn(rc, rc);
2063 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2064 return VINF_SUCCESS;
2065 }
2066# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2067
2068 RTGCPHYS GCPhys;
2069 uint64_t fFlags;
2070 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2071 if (RT_FAILURE(rc))
2072 {
2073 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2074 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2075 }
2076 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2077 {
2078 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2079 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2080 }
2081 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2082 {
2083 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2084 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2085 }
2086 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2087 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2088 /** @todo Check reserved bits and such stuff. PGM is better at doing
2089 * that, so do it when implementing the guest virtual address
2090 * TLB... */
2091
2092 /*
2093 * Read the bytes at this address.
2094 *
2095 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2096 * and since PATM should only patch the start of an instruction there
2097 * should be no need to check again here.
2098 */
2099 if (!pVCpu->iem.s.fBypassHandlers)
2100 {
2101 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2102 cbToTryRead, PGMACCESSORIGIN_IEM);
2103 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2104 { /* likely */ }
2105 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2106 {
2107 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2108 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2109 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2110 }
2111 else
2112 {
2113 Log((RT_SUCCESS(rcStrict)
2114 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2115 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2116 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2117 return rcStrict;
2118 }
2119 }
2120 else
2121 {
2122 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2123 if (RT_SUCCESS(rc))
2124 { /* likely */ }
2125 else
2126 {
2127 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2128 return rc;
2129 }
2130 }
2131 pVCpu->iem.s.cbOpcode += cbToTryRead;
2132 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2133
2134 return VINF_SUCCESS;
2135}
2136
2137#endif /* !IEM_WITH_CODE_TLB */
2138#ifndef IEM_WITH_SETJMP
2139
2140/**
2141 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2142 *
2143 * @returns Strict VBox status code.
2144 * @param pVCpu The cross context virtual CPU structure of the
2145 * calling thread.
2146 * @param pb Where to return the opcode byte.
2147 */
2148DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2149{
2150 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2151 if (rcStrict == VINF_SUCCESS)
2152 {
2153 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2154 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2155 pVCpu->iem.s.offOpcode = offOpcode + 1;
2156 }
2157 else
2158 *pb = 0;
2159 return rcStrict;
2160}
2161
2162
2163/**
2164 * Fetches the next opcode byte.
2165 *
2166 * @returns Strict VBox status code.
2167 * @param pVCpu The cross context virtual CPU structure of the
2168 * calling thread.
2169 * @param pu8 Where to return the opcode byte.
2170 */
2171DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2172{
2173 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2174 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2175 {
2176 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2177 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2178 return VINF_SUCCESS;
2179 }
2180 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2181}
2182
2183#else /* IEM_WITH_SETJMP */
2184
2185/**
2186 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2187 *
2188 * @returns The opcode byte.
2189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2190 */
2191DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2192{
2193# ifdef IEM_WITH_CODE_TLB
2194 uint8_t u8;
2195 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2196 return u8;
2197# else
2198 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2199 if (rcStrict == VINF_SUCCESS)
2200 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2201 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2202# endif
2203}
2204
2205
2206/**
2207 * Fetches the next opcode byte, longjmp on error.
2208 *
2209 * @returns The opcode byte.
2210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2211 */
2212DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2213{
2214# ifdef IEM_WITH_CODE_TLB
2215 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2216 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2217 if (RT_LIKELY( pbBuf != NULL
2218 && offBuf < pVCpu->iem.s.cbInstrBuf))
2219 {
2220 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2221 return pbBuf[offBuf];
2222 }
2223# else
2224 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2225 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2226 {
2227 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2228 return pVCpu->iem.s.abOpcode[offOpcode];
2229 }
2230# endif
2231 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2232}
2233
2234#endif /* IEM_WITH_SETJMP */
2235
2236/**
2237 * Fetches the next opcode byte, returns automatically on failure.
2238 *
2239 * @param a_pu8 Where to return the opcode byte.
2240 * @remark Implicitly references pVCpu.
2241 */
2242#ifndef IEM_WITH_SETJMP
2243# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2244 do \
2245 { \
2246 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2247 if (rcStrict2 == VINF_SUCCESS) \
2248 { /* likely */ } \
2249 else \
2250 return rcStrict2; \
2251 } while (0)
2252#else
2253# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2254#endif /* IEM_WITH_SETJMP */
2255
2256
2257#ifndef IEM_WITH_SETJMP
2258/**
2259 * Fetches the next signed byte from the opcode stream.
2260 *
2261 * @returns Strict VBox status code.
2262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2263 * @param pi8 Where to return the signed byte.
2264 */
2265DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2266{
2267 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2268}
2269#endif /* !IEM_WITH_SETJMP */
2270
2271
2272/**
2273 * Fetches the next signed byte from the opcode stream, returning automatically
2274 * on failure.
2275 *
2276 * @param a_pi8 Where to return the signed byte.
2277 * @remark Implicitly references pVCpu.
2278 */
2279#ifndef IEM_WITH_SETJMP
2280# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2281 do \
2282 { \
2283 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2284 if (rcStrict2 != VINF_SUCCESS) \
2285 return rcStrict2; \
2286 } while (0)
2287#else /* IEM_WITH_SETJMP */
2288# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2289
2290#endif /* IEM_WITH_SETJMP */
2291
2292#ifndef IEM_WITH_SETJMP
2293
2294/**
2295 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2296 *
2297 * @returns Strict VBox status code.
2298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2299 * @param pu16 Where to return the opcode dword.
2300 */
2301DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2302{
2303 uint8_t u8;
2304 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2305 if (rcStrict == VINF_SUCCESS)
2306 *pu16 = (int8_t)u8;
2307 return rcStrict;
2308}
2309
2310
2311/**
2312 * Fetches the next signed byte from the opcode stream, extending it to
2313 * unsigned 16-bit.
2314 *
2315 * @returns Strict VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param pu16 Where to return the unsigned word.
2318 */
2319DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2320{
2321 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2322 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2323 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2324
2325 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2326 pVCpu->iem.s.offOpcode = offOpcode + 1;
2327 return VINF_SUCCESS;
2328}
2329
2330#endif /* !IEM_WITH_SETJMP */
2331
2332/**
2333 * Fetches the next signed byte from the opcode stream and sign-extending it to
2334 * a word, returning automatically on failure.
2335 *
2336 * @param a_pu16 Where to return the word.
2337 * @remark Implicitly references pVCpu.
2338 */
2339#ifndef IEM_WITH_SETJMP
2340# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2341 do \
2342 { \
2343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2344 if (rcStrict2 != VINF_SUCCESS) \
2345 return rcStrict2; \
2346 } while (0)
2347#else
2348# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2349#endif
2350
2351#ifndef IEM_WITH_SETJMP
2352
2353/**
2354 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2355 *
2356 * @returns Strict VBox status code.
2357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2358 * @param pu32 Where to return the opcode dword.
2359 */
2360DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2361{
2362 uint8_t u8;
2363 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2364 if (rcStrict == VINF_SUCCESS)
2365 *pu32 = (int8_t)u8;
2366 return rcStrict;
2367}
2368
2369
2370/**
2371 * Fetches the next signed byte from the opcode stream, extending it to
2372 * unsigned 32-bit.
2373 *
2374 * @returns Strict VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2376 * @param pu32 Where to return the unsigned dword.
2377 */
2378DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2379{
2380 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2381 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2382 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2383
2384 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2385 pVCpu->iem.s.offOpcode = offOpcode + 1;
2386 return VINF_SUCCESS;
2387}
2388
2389#endif /* !IEM_WITH_SETJMP */
2390
2391/**
2392 * Fetches the next signed byte from the opcode stream and sign-extending it to
2393 * a word, returning automatically on failure.
2394 *
2395 * @param a_pu32 Where to return the word.
2396 * @remark Implicitly references pVCpu.
2397 */
2398#ifndef IEM_WITH_SETJMP
2399#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2400 do \
2401 { \
2402 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2403 if (rcStrict2 != VINF_SUCCESS) \
2404 return rcStrict2; \
2405 } while (0)
2406#else
2407# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2408#endif
2409
2410#ifndef IEM_WITH_SETJMP
2411
2412/**
2413 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2414 *
2415 * @returns Strict VBox status code.
2416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2417 * @param pu64 Where to return the opcode qword.
2418 */
2419DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2420{
2421 uint8_t u8;
2422 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2423 if (rcStrict == VINF_SUCCESS)
2424 *pu64 = (int8_t)u8;
2425 return rcStrict;
2426}
2427
2428
2429/**
2430 * Fetches the next signed byte from the opcode stream, extending it to
2431 * unsigned 64-bit.
2432 *
2433 * @returns Strict VBox status code.
2434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2435 * @param pu64 Where to return the unsigned qword.
2436 */
2437DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2438{
2439 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2440 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2441 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2442
2443 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2444 pVCpu->iem.s.offOpcode = offOpcode + 1;
2445 return VINF_SUCCESS;
2446}
2447
2448#endif /* !IEM_WITH_SETJMP */
2449
2450
2451/**
2452 * Fetches the next signed byte from the opcode stream and sign-extending it to
2453 * a word, returning automatically on failure.
2454 *
2455 * @param a_pu64 Where to return the word.
2456 * @remark Implicitly references pVCpu.
2457 */
2458#ifndef IEM_WITH_SETJMP
2459# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2460 do \
2461 { \
2462 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2463 if (rcStrict2 != VINF_SUCCESS) \
2464 return rcStrict2; \
2465 } while (0)
2466#else
2467# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2468#endif
2469
2470
2471#ifndef IEM_WITH_SETJMP
2472/**
2473 * Fetches the next opcode byte.
2474 *
2475 * @returns Strict VBox status code.
2476 * @param pVCpu The cross context virtual CPU structure of the
2477 * calling thread.
2478 * @param pu8 Where to return the opcode byte.
2479 */
2480DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2481{
2482 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2483 pVCpu->iem.s.offModRm = offOpcode;
2484 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2485 {
2486 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2487 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2488 return VINF_SUCCESS;
2489 }
2490 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2491}
2492#else /* IEM_WITH_SETJMP */
2493/**
2494 * Fetches the next opcode byte, longjmp on error.
2495 *
2496 * @returns The opcode byte.
2497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2498 */
2499DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2500{
2501# ifdef IEM_WITH_CODE_TLB
2502 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2503 pVCpu->iem.s.offModRm = offBuf;
2504 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2505 if (RT_LIKELY( pbBuf != NULL
2506 && offBuf < pVCpu->iem.s.cbInstrBuf))
2507 {
2508 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2509 return pbBuf[offBuf];
2510 }
2511# else
2512 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2513 pVCpu->iem.s.offModRm = offOpcode;
2514 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2515 {
2516 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2517 return pVCpu->iem.s.abOpcode[offOpcode];
2518 }
2519# endif
2520 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2521}
2522#endif /* IEM_WITH_SETJMP */
2523
2524/**
2525 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2526 * on failure.
2527 *
2528 * Will note down the position of the ModR/M byte for VT-x exits.
2529 *
2530 * @param a_pbRm Where to return the RM opcode byte.
2531 * @remark Implicitly references pVCpu.
2532 */
2533#ifndef IEM_WITH_SETJMP
2534# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2535 do \
2536 { \
2537 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2538 if (rcStrict2 == VINF_SUCCESS) \
2539 { /* likely */ } \
2540 else \
2541 return rcStrict2; \
2542 } while (0)
2543#else
2544# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2545#endif /* IEM_WITH_SETJMP */
2546
2547
2548#ifndef IEM_WITH_SETJMP
2549
2550/**
2551 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2552 *
2553 * @returns Strict VBox status code.
2554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2555 * @param pu16 Where to return the opcode word.
2556 */
2557DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2558{
2559 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2560 if (rcStrict == VINF_SUCCESS)
2561 {
2562 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2563# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2564 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2565# else
2566 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2567# endif
2568 pVCpu->iem.s.offOpcode = offOpcode + 2;
2569 }
2570 else
2571 *pu16 = 0;
2572 return rcStrict;
2573}
2574
2575
2576/**
2577 * Fetches the next opcode word.
2578 *
2579 * @returns Strict VBox status code.
2580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2581 * @param pu16 Where to return the opcode word.
2582 */
2583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2584{
2585 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2586 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2587 {
2588 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2589# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2590 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2591# else
2592 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2593# endif
2594 return VINF_SUCCESS;
2595 }
2596 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2597}
2598
2599#else /* IEM_WITH_SETJMP */
2600
2601/**
2602 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2603 *
2604 * @returns The opcode word.
2605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2606 */
2607DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2608{
2609# ifdef IEM_WITH_CODE_TLB
2610 uint16_t u16;
2611 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2612 return u16;
2613# else
2614 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2615 if (rcStrict == VINF_SUCCESS)
2616 {
2617 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2618 pVCpu->iem.s.offOpcode += 2;
2619# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2620 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2621# else
2622 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2623# endif
2624 }
2625 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2626# endif
2627}
2628
2629
2630/**
2631 * Fetches the next opcode word, longjmp on error.
2632 *
2633 * @returns The opcode word.
2634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2635 */
2636DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2637{
2638# ifdef IEM_WITH_CODE_TLB
2639 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2640 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2641 if (RT_LIKELY( pbBuf != NULL
2642 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2643 {
2644 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2645# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2646 return *(uint16_t const *)&pbBuf[offBuf];
2647# else
2648 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2649# endif
2650 }
2651# else
2652 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2654 {
2655 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2656# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2657 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2658# else
2659 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2660# endif
2661 }
2662# endif
2663 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2664}
2665
2666#endif /* IEM_WITH_SETJMP */
2667
2668
2669/**
2670 * Fetches the next opcode word, returns automatically on failure.
2671 *
2672 * @param a_pu16 Where to return the opcode word.
2673 * @remark Implicitly references pVCpu.
2674 */
2675#ifndef IEM_WITH_SETJMP
2676# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2677 do \
2678 { \
2679 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2680 if (rcStrict2 != VINF_SUCCESS) \
2681 return rcStrict2; \
2682 } while (0)
2683#else
2684# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2685#endif
2686
2687#ifndef IEM_WITH_SETJMP
2688
2689/**
2690 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2691 *
2692 * @returns Strict VBox status code.
2693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2694 * @param pu32 Where to return the opcode double word.
2695 */
2696DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2697{
2698 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2699 if (rcStrict == VINF_SUCCESS)
2700 {
2701 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2702 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2703 pVCpu->iem.s.offOpcode = offOpcode + 2;
2704 }
2705 else
2706 *pu32 = 0;
2707 return rcStrict;
2708}
2709
2710
2711/**
2712 * Fetches the next opcode word, zero extending it to a double word.
2713 *
2714 * @returns Strict VBox status code.
2715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2716 * @param pu32 Where to return the opcode double word.
2717 */
2718DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2719{
2720 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2721 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2722 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2723
2724 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2725 pVCpu->iem.s.offOpcode = offOpcode + 2;
2726 return VINF_SUCCESS;
2727}
2728
2729#endif /* !IEM_WITH_SETJMP */
2730
2731
2732/**
2733 * Fetches the next opcode word and zero extends it to a double word, returns
2734 * automatically on failure.
2735 *
2736 * @param a_pu32 Where to return the opcode double word.
2737 * @remark Implicitly references pVCpu.
2738 */
2739#ifndef IEM_WITH_SETJMP
2740# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2741 do \
2742 { \
2743 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2744 if (rcStrict2 != VINF_SUCCESS) \
2745 return rcStrict2; \
2746 } while (0)
2747#else
2748# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2749#endif
2750
2751#ifndef IEM_WITH_SETJMP
2752
2753/**
2754 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2755 *
2756 * @returns Strict VBox status code.
2757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2758 * @param pu64 Where to return the opcode quad word.
2759 */
2760DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2761{
2762 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2763 if (rcStrict == VINF_SUCCESS)
2764 {
2765 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2766 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2767 pVCpu->iem.s.offOpcode = offOpcode + 2;
2768 }
2769 else
2770 *pu64 = 0;
2771 return rcStrict;
2772}
2773
2774
2775/**
2776 * Fetches the next opcode word, zero extending it to a quad word.
2777 *
2778 * @returns Strict VBox status code.
2779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2780 * @param pu64 Where to return the opcode quad word.
2781 */
2782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2783{
2784 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2785 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2786 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2787
2788 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2789 pVCpu->iem.s.offOpcode = offOpcode + 2;
2790 return VINF_SUCCESS;
2791}
2792
2793#endif /* !IEM_WITH_SETJMP */
2794
2795/**
2796 * Fetches the next opcode word and zero extends it to a quad word, returns
2797 * automatically on failure.
2798 *
2799 * @param a_pu64 Where to return the opcode quad word.
2800 * @remark Implicitly references pVCpu.
2801 */
2802#ifndef IEM_WITH_SETJMP
2803# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2804 do \
2805 { \
2806 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2807 if (rcStrict2 != VINF_SUCCESS) \
2808 return rcStrict2; \
2809 } while (0)
2810#else
2811# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2812#endif
2813
2814
2815#ifndef IEM_WITH_SETJMP
2816/**
2817 * Fetches the next signed word from the opcode stream.
2818 *
2819 * @returns Strict VBox status code.
2820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2821 * @param pi16 Where to return the signed word.
2822 */
2823DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2824{
2825 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2826}
2827#endif /* !IEM_WITH_SETJMP */
2828
2829
2830/**
2831 * Fetches the next signed word from the opcode stream, returning automatically
2832 * on failure.
2833 *
2834 * @param a_pi16 Where to return the signed word.
2835 * @remark Implicitly references pVCpu.
2836 */
2837#ifndef IEM_WITH_SETJMP
2838# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2839 do \
2840 { \
2841 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2842 if (rcStrict2 != VINF_SUCCESS) \
2843 return rcStrict2; \
2844 } while (0)
2845#else
2846# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2847#endif
2848
2849#ifndef IEM_WITH_SETJMP
2850
2851/**
2852 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2853 *
2854 * @returns Strict VBox status code.
2855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2856 * @param pu32 Where to return the opcode dword.
2857 */
2858DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2859{
2860 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2861 if (rcStrict == VINF_SUCCESS)
2862 {
2863 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2864# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2865 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2866# else
2867 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2868 pVCpu->iem.s.abOpcode[offOpcode + 1],
2869 pVCpu->iem.s.abOpcode[offOpcode + 2],
2870 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2871# endif
2872 pVCpu->iem.s.offOpcode = offOpcode + 4;
2873 }
2874 else
2875 *pu32 = 0;
2876 return rcStrict;
2877}
2878
2879
2880/**
2881 * Fetches the next opcode dword.
2882 *
2883 * @returns Strict VBox status code.
2884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2885 * @param pu32 Where to return the opcode double word.
2886 */
2887DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2888{
2889 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2890 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2891 {
2892 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2893# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2894 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2895# else
2896 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2897 pVCpu->iem.s.abOpcode[offOpcode + 1],
2898 pVCpu->iem.s.abOpcode[offOpcode + 2],
2899 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2900# endif
2901 return VINF_SUCCESS;
2902 }
2903 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2904}
2905
2906#else /* !IEM_WITH_SETJMP */
2907
2908/**
2909 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2910 *
2911 * @returns The opcode dword.
2912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2913 */
2914DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2915{
2916# ifdef IEM_WITH_CODE_TLB
2917 uint32_t u32;
2918 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2919 return u32;
2920# else
2921 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2922 if (rcStrict == VINF_SUCCESS)
2923 {
2924 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2925 pVCpu->iem.s.offOpcode = offOpcode + 4;
2926# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2927 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2928# else
2929 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2930 pVCpu->iem.s.abOpcode[offOpcode + 1],
2931 pVCpu->iem.s.abOpcode[offOpcode + 2],
2932 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2933# endif
2934 }
2935 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2936# endif
2937}
2938
2939
2940/**
2941 * Fetches the next opcode dword, longjmp on error.
2942 *
2943 * @returns The opcode dword.
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 */
2946DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2947{
2948# ifdef IEM_WITH_CODE_TLB
2949 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2950 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2951 if (RT_LIKELY( pbBuf != NULL
2952 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2953 {
2954 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2955# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2956 return *(uint32_t const *)&pbBuf[offBuf];
2957# else
2958 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2959 pbBuf[offBuf + 1],
2960 pbBuf[offBuf + 2],
2961 pbBuf[offBuf + 3]);
2962# endif
2963 }
2964# else
2965 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2966 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2967 {
2968 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2969# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2970 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2971# else
2972 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2973 pVCpu->iem.s.abOpcode[offOpcode + 1],
2974 pVCpu->iem.s.abOpcode[offOpcode + 2],
2975 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2976# endif
2977 }
2978# endif
2979 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2980}
2981
2982#endif /* !IEM_WITH_SETJMP */
2983
2984
2985/**
2986 * Fetches the next opcode dword, returns automatically on failure.
2987 *
2988 * @param a_pu32 Where to return the opcode dword.
2989 * @remark Implicitly references pVCpu.
2990 */
2991#ifndef IEM_WITH_SETJMP
2992# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2993 do \
2994 { \
2995 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2996 if (rcStrict2 != VINF_SUCCESS) \
2997 return rcStrict2; \
2998 } while (0)
2999#else
3000# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
3001#endif
3002
3003#ifndef IEM_WITH_SETJMP
3004
3005/**
3006 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3007 *
3008 * @returns Strict VBox status code.
3009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3010 * @param pu64 Where to return the opcode dword.
3011 */
3012DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3013{
3014 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3015 if (rcStrict == VINF_SUCCESS)
3016 {
3017 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3018 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3019 pVCpu->iem.s.abOpcode[offOpcode + 1],
3020 pVCpu->iem.s.abOpcode[offOpcode + 2],
3021 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3022 pVCpu->iem.s.offOpcode = offOpcode + 4;
3023 }
3024 else
3025 *pu64 = 0;
3026 return rcStrict;
3027}
3028
3029
3030/**
3031 * Fetches the next opcode dword, zero extending it to a quad word.
3032 *
3033 * @returns Strict VBox status code.
3034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3035 * @param pu64 Where to return the opcode quad word.
3036 */
3037DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3038{
3039 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3040 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3041 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3042
3043 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3044 pVCpu->iem.s.abOpcode[offOpcode + 1],
3045 pVCpu->iem.s.abOpcode[offOpcode + 2],
3046 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3047 pVCpu->iem.s.offOpcode = offOpcode + 4;
3048 return VINF_SUCCESS;
3049}
3050
3051#endif /* !IEM_WITH_SETJMP */
3052
3053
3054/**
3055 * Fetches the next opcode dword and zero extends it to a quad word, returns
3056 * automatically on failure.
3057 *
3058 * @param a_pu64 Where to return the opcode quad word.
3059 * @remark Implicitly references pVCpu.
3060 */
3061#ifndef IEM_WITH_SETJMP
3062# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3063 do \
3064 { \
3065 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3066 if (rcStrict2 != VINF_SUCCESS) \
3067 return rcStrict2; \
3068 } while (0)
3069#else
3070# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3071#endif
3072
3073
3074#ifndef IEM_WITH_SETJMP
3075/**
3076 * Fetches the next signed double word from the opcode stream.
3077 *
3078 * @returns Strict VBox status code.
3079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3080 * @param pi32 Where to return the signed double word.
3081 */
3082DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3083{
3084 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3085}
3086#endif
3087
3088/**
3089 * Fetches the next signed double word from the opcode stream, returning
3090 * automatically on failure.
3091 *
3092 * @param a_pi32 Where to return the signed double word.
3093 * @remark Implicitly references pVCpu.
3094 */
3095#ifndef IEM_WITH_SETJMP
3096# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3097 do \
3098 { \
3099 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3100 if (rcStrict2 != VINF_SUCCESS) \
3101 return rcStrict2; \
3102 } while (0)
3103#else
3104# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3105#endif
3106
3107#ifndef IEM_WITH_SETJMP
3108
3109/**
3110 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3111 *
3112 * @returns Strict VBox status code.
3113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3114 * @param pu64 Where to return the opcode qword.
3115 */
3116DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3117{
3118 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3119 if (rcStrict == VINF_SUCCESS)
3120 {
3121 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3122 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3123 pVCpu->iem.s.abOpcode[offOpcode + 1],
3124 pVCpu->iem.s.abOpcode[offOpcode + 2],
3125 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3126 pVCpu->iem.s.offOpcode = offOpcode + 4;
3127 }
3128 else
3129 *pu64 = 0;
3130 return rcStrict;
3131}
3132
3133
3134/**
3135 * Fetches the next opcode dword, sign extending it into a quad word.
3136 *
3137 * @returns Strict VBox status code.
3138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3139 * @param pu64 Where to return the opcode quad word.
3140 */
3141DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3142{
3143 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3144 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3145 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3146
3147 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3151 *pu64 = i32;
3152 pVCpu->iem.s.offOpcode = offOpcode + 4;
3153 return VINF_SUCCESS;
3154}
3155
3156#endif /* !IEM_WITH_SETJMP */
3157
3158
3159/**
3160 * Fetches the next opcode double word and sign extends it to a quad word,
3161 * returns automatically on failure.
3162 *
3163 * @param a_pu64 Where to return the opcode quad word.
3164 * @remark Implicitly references pVCpu.
3165 */
3166#ifndef IEM_WITH_SETJMP
3167# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3168 do \
3169 { \
3170 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3171 if (rcStrict2 != VINF_SUCCESS) \
3172 return rcStrict2; \
3173 } while (0)
3174#else
3175# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3176#endif
3177
3178#ifndef IEM_WITH_SETJMP
3179
3180/**
3181 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3182 *
3183 * @returns Strict VBox status code.
3184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3185 * @param pu64 Where to return the opcode qword.
3186 */
3187DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3188{
3189 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3190 if (rcStrict == VINF_SUCCESS)
3191 {
3192 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3193# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3194 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3195# else
3196 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3197 pVCpu->iem.s.abOpcode[offOpcode + 1],
3198 pVCpu->iem.s.abOpcode[offOpcode + 2],
3199 pVCpu->iem.s.abOpcode[offOpcode + 3],
3200 pVCpu->iem.s.abOpcode[offOpcode + 4],
3201 pVCpu->iem.s.abOpcode[offOpcode + 5],
3202 pVCpu->iem.s.abOpcode[offOpcode + 6],
3203 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3204# endif
3205 pVCpu->iem.s.offOpcode = offOpcode + 8;
3206 }
3207 else
3208 *pu64 = 0;
3209 return rcStrict;
3210}
3211
3212
3213/**
3214 * Fetches the next opcode qword.
3215 *
3216 * @returns Strict VBox status code.
3217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3218 * @param pu64 Where to return the opcode qword.
3219 */
3220DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3221{
3222 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3223 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3224 {
3225# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3226 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3227# else
3228 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3229 pVCpu->iem.s.abOpcode[offOpcode + 1],
3230 pVCpu->iem.s.abOpcode[offOpcode + 2],
3231 pVCpu->iem.s.abOpcode[offOpcode + 3],
3232 pVCpu->iem.s.abOpcode[offOpcode + 4],
3233 pVCpu->iem.s.abOpcode[offOpcode + 5],
3234 pVCpu->iem.s.abOpcode[offOpcode + 6],
3235 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3236# endif
3237 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3238 return VINF_SUCCESS;
3239 }
3240 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3241}
3242
3243#else /* IEM_WITH_SETJMP */
3244
3245/**
3246 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3247 *
3248 * @returns The opcode qword.
3249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3250 */
3251DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3252{
3253# ifdef IEM_WITH_CODE_TLB
3254 uint64_t u64;
3255 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3256 return u64;
3257# else
3258 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3259 if (rcStrict == VINF_SUCCESS)
3260 {
3261 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3262 pVCpu->iem.s.offOpcode = offOpcode + 8;
3263# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3264 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3265# else
3266 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3267 pVCpu->iem.s.abOpcode[offOpcode + 1],
3268 pVCpu->iem.s.abOpcode[offOpcode + 2],
3269 pVCpu->iem.s.abOpcode[offOpcode + 3],
3270 pVCpu->iem.s.abOpcode[offOpcode + 4],
3271 pVCpu->iem.s.abOpcode[offOpcode + 5],
3272 pVCpu->iem.s.abOpcode[offOpcode + 6],
3273 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3274# endif
3275 }
3276 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3277# endif
3278}
3279
3280
3281/**
3282 * Fetches the next opcode qword, longjmp on error.
3283 *
3284 * @returns The opcode qword.
3285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3286 */
3287DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3288{
3289# ifdef IEM_WITH_CODE_TLB
3290 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3291 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3292 if (RT_LIKELY( pbBuf != NULL
3293 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3294 {
3295 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3296# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3297 return *(uint64_t const *)&pbBuf[offBuf];
3298# else
3299 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3300 pbBuf[offBuf + 1],
3301 pbBuf[offBuf + 2],
3302 pbBuf[offBuf + 3],
3303 pbBuf[offBuf + 4],
3304 pbBuf[offBuf + 5],
3305 pbBuf[offBuf + 6],
3306 pbBuf[offBuf + 7]);
3307# endif
3308 }
3309# else
3310 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3311 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3312 {
3313 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3314# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3315 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3316# else
3317 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3318 pVCpu->iem.s.abOpcode[offOpcode + 1],
3319 pVCpu->iem.s.abOpcode[offOpcode + 2],
3320 pVCpu->iem.s.abOpcode[offOpcode + 3],
3321 pVCpu->iem.s.abOpcode[offOpcode + 4],
3322 pVCpu->iem.s.abOpcode[offOpcode + 5],
3323 pVCpu->iem.s.abOpcode[offOpcode + 6],
3324 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3325# endif
3326 }
3327# endif
3328 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3329}
3330
3331#endif /* IEM_WITH_SETJMP */
3332
3333/**
3334 * Fetches the next opcode quad word, returns automatically on failure.
3335 *
3336 * @param a_pu64 Where to return the opcode quad word.
3337 * @remark Implicitly references pVCpu.
3338 */
3339#ifndef IEM_WITH_SETJMP
3340# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3341 do \
3342 { \
3343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3344 if (rcStrict2 != VINF_SUCCESS) \
3345 return rcStrict2; \
3346 } while (0)
3347#else
3348# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3349#endif
3350
3351
3352/** @name Misc Worker Functions.
3353 * @{
3354 */
3355
3356/**
3357 * Gets the exception class for the specified exception vector.
3358 *
3359 * @returns The class of the specified exception.
3360 * @param uVector The exception vector.
3361 */
3362IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3363{
3364 Assert(uVector <= X86_XCPT_LAST);
3365 switch (uVector)
3366 {
3367 case X86_XCPT_DE:
3368 case X86_XCPT_TS:
3369 case X86_XCPT_NP:
3370 case X86_XCPT_SS:
3371 case X86_XCPT_GP:
3372 case X86_XCPT_SX: /* AMD only */
3373 return IEMXCPTCLASS_CONTRIBUTORY;
3374
3375 case X86_XCPT_PF:
3376 case X86_XCPT_VE: /* Intel only */
3377 return IEMXCPTCLASS_PAGE_FAULT;
3378
3379 case X86_XCPT_DF:
3380 return IEMXCPTCLASS_DOUBLE_FAULT;
3381 }
3382 return IEMXCPTCLASS_BENIGN;
3383}
3384
3385
3386/**
3387 * Evaluates how to handle an exception caused during delivery of another event
3388 * (exception / interrupt).
3389 *
3390 * @returns How to handle the recursive exception.
3391 * @param pVCpu The cross context virtual CPU structure of the
3392 * calling thread.
3393 * @param fPrevFlags The flags of the previous event.
3394 * @param uPrevVector The vector of the previous event.
3395 * @param fCurFlags The flags of the current exception.
3396 * @param uCurVector The vector of the current exception.
3397 * @param pfXcptRaiseInfo Where to store additional information about the
3398 * exception condition. Optional.
3399 */
3400VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3401 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3402{
3403 /*
3404 * Only CPU exceptions can be raised while delivering other events, software interrupt
3405 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3406 */
3407 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3408 Assert(pVCpu); RT_NOREF(pVCpu);
3409 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3410
3411 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3412 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3413 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3414 {
3415 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3416 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3417 {
3418 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3419 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3420 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3421 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3422 {
3423 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3424 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3425 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3426 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3427 uCurVector, pVCpu->cpum.GstCtx.cr2));
3428 }
3429 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3430 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3431 {
3432 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3433 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3434 }
3435 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3436 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3437 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3438 {
3439 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3440 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3441 }
3442 }
3443 else
3444 {
3445 if (uPrevVector == X86_XCPT_NMI)
3446 {
3447 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3448 if (uCurVector == X86_XCPT_PF)
3449 {
3450 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3451 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3452 }
3453 }
3454 else if ( uPrevVector == X86_XCPT_AC
3455 && uCurVector == X86_XCPT_AC)
3456 {
3457 enmRaise = IEMXCPTRAISE_CPU_HANG;
3458 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3459 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3460 }
3461 }
3462 }
3463 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3464 {
3465 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3466 if (uCurVector == X86_XCPT_PF)
3467 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3468 }
3469 else
3470 {
3471 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3472 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3473 }
3474
3475 if (pfXcptRaiseInfo)
3476 *pfXcptRaiseInfo = fRaiseInfo;
3477 return enmRaise;
3478}
3479
3480
3481/**
3482 * Enters the CPU shutdown state initiated by a triple fault or other
3483 * unrecoverable conditions.
3484 *
3485 * @returns Strict VBox status code.
3486 * @param pVCpu The cross context virtual CPU structure of the
3487 * calling thread.
3488 */
3489IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3490{
3491 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3492 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3493
3494 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3495 {
3496 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3497 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3498 }
3499
3500 RT_NOREF(pVCpu);
3501 return VINF_EM_TRIPLE_FAULT;
3502}
3503
3504
3505/**
3506 * Validates a new SS segment.
3507 *
3508 * @returns VBox strict status code.
3509 * @param pVCpu The cross context virtual CPU structure of the
3510 * calling thread.
3511 * @param NewSS The new SS selctor.
3512 * @param uCpl The CPL to load the stack for.
3513 * @param pDesc Where to return the descriptor.
3514 */
3515IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3516{
3517 /* Null selectors are not allowed (we're not called for dispatching
3518 interrupts with SS=0 in long mode). */
3519 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3520 {
3521 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3522 return iemRaiseTaskSwitchFault0(pVCpu);
3523 }
3524
3525 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3526 if ((NewSS & X86_SEL_RPL) != uCpl)
3527 {
3528 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3529 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3530 }
3531
3532 /*
3533 * Read the descriptor.
3534 */
3535 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3536 if (rcStrict != VINF_SUCCESS)
3537 return rcStrict;
3538
3539 /*
3540 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3541 */
3542 if (!pDesc->Legacy.Gen.u1DescType)
3543 {
3544 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3545 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3546 }
3547
3548 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3549 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3550 {
3551 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3552 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3553 }
3554 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3555 {
3556 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3557 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3558 }
3559
3560 /* Is it there? */
3561 /** @todo testcase: Is this checked before the canonical / limit check below? */
3562 if (!pDesc->Legacy.Gen.u1Present)
3563 {
3564 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3565 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3566 }
3567
3568 return VINF_SUCCESS;
3569}
3570
3571
3572/**
3573 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3574 * not.
3575 *
3576 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3577 */
3578#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3579# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3580#else
3581# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3582#endif
3583
3584/**
3585 * Updates the EFLAGS in the correct manner wrt. PATM.
3586 *
3587 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3588 * @param a_fEfl The new EFLAGS.
3589 */
3590#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3591# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3592#else
3593# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3594#endif
3595
3596
3597/** @} */
3598
3599/** @name Raising Exceptions.
3600 *
3601 * @{
3602 */
3603
3604
3605/**
3606 * Loads the specified stack far pointer from the TSS.
3607 *
3608 * @returns VBox strict status code.
3609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3610 * @param uCpl The CPL to load the stack for.
3611 * @param pSelSS Where to return the new stack segment.
3612 * @param puEsp Where to return the new stack pointer.
3613 */
3614IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3615{
3616 VBOXSTRICTRC rcStrict;
3617 Assert(uCpl < 4);
3618
3619 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3620 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3621 {
3622 /*
3623 * 16-bit TSS (X86TSS16).
3624 */
3625 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3626 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3627 {
3628 uint32_t off = uCpl * 4 + 2;
3629 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3630 {
3631 /** @todo check actual access pattern here. */
3632 uint32_t u32Tmp = 0; /* gcc maybe... */
3633 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3634 if (rcStrict == VINF_SUCCESS)
3635 {
3636 *puEsp = RT_LOWORD(u32Tmp);
3637 *pSelSS = RT_HIWORD(u32Tmp);
3638 return VINF_SUCCESS;
3639 }
3640 }
3641 else
3642 {
3643 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3644 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3645 }
3646 break;
3647 }
3648
3649 /*
3650 * 32-bit TSS (X86TSS32).
3651 */
3652 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3653 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3654 {
3655 uint32_t off = uCpl * 8 + 4;
3656 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3657 {
3658/** @todo check actual access pattern here. */
3659 uint64_t u64Tmp;
3660 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3661 if (rcStrict == VINF_SUCCESS)
3662 {
3663 *puEsp = u64Tmp & UINT32_MAX;
3664 *pSelSS = (RTSEL)(u64Tmp >> 32);
3665 return VINF_SUCCESS;
3666 }
3667 }
3668 else
3669 {
3670 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3671 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3672 }
3673 break;
3674 }
3675
3676 default:
3677 AssertFailed();
3678 rcStrict = VERR_IEM_IPE_4;
3679 break;
3680 }
3681
3682 *puEsp = 0; /* make gcc happy */
3683 *pSelSS = 0; /* make gcc happy */
3684 return rcStrict;
3685}
3686
3687
3688/**
3689 * Loads the specified stack pointer from the 64-bit TSS.
3690 *
3691 * @returns VBox strict status code.
3692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3693 * @param uCpl The CPL to load the stack for.
3694 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3695 * @param puRsp Where to return the new stack pointer.
3696 */
3697IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3698{
3699 Assert(uCpl < 4);
3700 Assert(uIst < 8);
3701 *puRsp = 0; /* make gcc happy */
3702
3703 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3704 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3705
3706 uint32_t off;
3707 if (uIst)
3708 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3709 else
3710 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3711 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3712 {
3713 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3714 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3715 }
3716
3717 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3718}
3719
3720
3721/**
3722 * Adjust the CPU state according to the exception being raised.
3723 *
3724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3725 * @param u8Vector The exception that has been raised.
3726 */
3727DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3728{
3729 switch (u8Vector)
3730 {
3731 case X86_XCPT_DB:
3732 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3733 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3734 break;
3735 /** @todo Read the AMD and Intel exception reference... */
3736 }
3737}
3738
3739
3740/**
3741 * Implements exceptions and interrupts for real mode.
3742 *
3743 * @returns VBox strict status code.
3744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3745 * @param cbInstr The number of bytes to offset rIP by in the return
3746 * address.
3747 * @param u8Vector The interrupt / exception vector number.
3748 * @param fFlags The flags.
3749 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3750 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3751 */
3752IEM_STATIC VBOXSTRICTRC
3753iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3754 uint8_t cbInstr,
3755 uint8_t u8Vector,
3756 uint32_t fFlags,
3757 uint16_t uErr,
3758 uint64_t uCr2)
3759{
3760 NOREF(uErr); NOREF(uCr2);
3761 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3762
3763 /*
3764 * Read the IDT entry.
3765 */
3766 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3767 {
3768 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3769 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3770 }
3771 RTFAR16 Idte;
3772 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3773 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3774 {
3775 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3776 return rcStrict;
3777 }
3778
3779 /*
3780 * Push the stack frame.
3781 */
3782 uint16_t *pu16Frame;
3783 uint64_t uNewRsp;
3784 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3785 if (rcStrict != VINF_SUCCESS)
3786 return rcStrict;
3787
3788 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3789#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3790 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3791 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3792 fEfl |= UINT16_C(0xf000);
3793#endif
3794 pu16Frame[2] = (uint16_t)fEfl;
3795 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3796 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3797 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3798 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3799 return rcStrict;
3800
3801 /*
3802 * Load the vector address into cs:ip and make exception specific state
3803 * adjustments.
3804 */
3805 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3806 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3807 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3808 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3809 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3810 pVCpu->cpum.GstCtx.rip = Idte.off;
3811 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3812 IEMMISC_SET_EFL(pVCpu, fEfl);
3813
3814 /** @todo do we actually do this in real mode? */
3815 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3816 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3817
3818 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3819}
3820
3821
3822/**
3823 * Loads a NULL data selector into when coming from V8086 mode.
3824 *
3825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3826 * @param pSReg Pointer to the segment register.
3827 */
3828IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3829{
3830 pSReg->Sel = 0;
3831 pSReg->ValidSel = 0;
3832 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3833 {
3834 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3835 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3836 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3837 }
3838 else
3839 {
3840 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3841 /** @todo check this on AMD-V */
3842 pSReg->u64Base = 0;
3843 pSReg->u32Limit = 0;
3844 }
3845}
3846
3847
3848/**
3849 * Loads a segment selector during a task switch in V8086 mode.
3850 *
3851 * @param pSReg Pointer to the segment register.
3852 * @param uSel The selector value to load.
3853 */
3854IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3855{
3856 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3857 pSReg->Sel = uSel;
3858 pSReg->ValidSel = uSel;
3859 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3860 pSReg->u64Base = uSel << 4;
3861 pSReg->u32Limit = 0xffff;
3862 pSReg->Attr.u = 0xf3;
3863}
3864
3865
3866/**
3867 * Loads a NULL data selector into a selector register, both the hidden and
3868 * visible parts, in protected mode.
3869 *
3870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3871 * @param pSReg Pointer to the segment register.
3872 * @param uRpl The RPL.
3873 */
3874IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3875{
3876 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3877 * data selector in protected mode. */
3878 pSReg->Sel = uRpl;
3879 pSReg->ValidSel = uRpl;
3880 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3881 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3882 {
3883 /* VT-x (Intel 3960x) observed doing something like this. */
3884 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3885 pSReg->u32Limit = UINT32_MAX;
3886 pSReg->u64Base = 0;
3887 }
3888 else
3889 {
3890 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3891 pSReg->u32Limit = 0;
3892 pSReg->u64Base = 0;
3893 }
3894}
3895
3896
3897/**
3898 * Loads a segment selector during a task switch in protected mode.
3899 *
3900 * In this task switch scenario, we would throw \#TS exceptions rather than
3901 * \#GPs.
3902 *
3903 * @returns VBox strict status code.
3904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3905 * @param pSReg Pointer to the segment register.
3906 * @param uSel The new selector value.
3907 *
3908 * @remarks This does _not_ handle CS or SS.
3909 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3910 */
3911IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3912{
3913 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3914
3915 /* Null data selector. */
3916 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3917 {
3918 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3919 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3920 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3921 return VINF_SUCCESS;
3922 }
3923
3924 /* Fetch the descriptor. */
3925 IEMSELDESC Desc;
3926 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3927 if (rcStrict != VINF_SUCCESS)
3928 {
3929 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3930 VBOXSTRICTRC_VAL(rcStrict)));
3931 return rcStrict;
3932 }
3933
3934 /* Must be a data segment or readable code segment. */
3935 if ( !Desc.Legacy.Gen.u1DescType
3936 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3937 {
3938 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3939 Desc.Legacy.Gen.u4Type));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3941 }
3942
3943 /* Check privileges for data segments and non-conforming code segments. */
3944 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3945 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3946 {
3947 /* The RPL and the new CPL must be less than or equal to the DPL. */
3948 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3949 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3950 {
3951 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3952 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3953 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3954 }
3955 }
3956
3957 /* Is it there? */
3958 if (!Desc.Legacy.Gen.u1Present)
3959 {
3960 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3961 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3962 }
3963
3964 /* The base and limit. */
3965 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3966 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3967
3968 /*
3969 * Ok, everything checked out fine. Now set the accessed bit before
3970 * committing the result into the registers.
3971 */
3972 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3973 {
3974 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3975 if (rcStrict != VINF_SUCCESS)
3976 return rcStrict;
3977 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3978 }
3979
3980 /* Commit */
3981 pSReg->Sel = uSel;
3982 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3983 pSReg->u32Limit = cbLimit;
3984 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3985 pSReg->ValidSel = uSel;
3986 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3987 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3988 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3989
3990 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3991 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3992 return VINF_SUCCESS;
3993}
3994
3995
3996/**
3997 * Performs a task switch.
3998 *
3999 * If the task switch is the result of a JMP, CALL or IRET instruction, the
4000 * caller is responsible for performing the necessary checks (like DPL, TSS
4001 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
4002 * reference for JMP, CALL, IRET.
4003 *
4004 * If the task switch is the due to a software interrupt or hardware exception,
4005 * the caller is responsible for validating the TSS selector and descriptor. See
4006 * Intel Instruction reference for INT n.
4007 *
4008 * @returns VBox strict status code.
4009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4010 * @param enmTaskSwitch The cause of the task switch.
4011 * @param uNextEip The EIP effective after the task switch.
4012 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4013 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4014 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4015 * @param SelTSS The TSS selector of the new task.
4016 * @param pNewDescTSS Pointer to the new TSS descriptor.
4017 */
4018IEM_STATIC VBOXSTRICTRC
4019iemTaskSwitch(PVMCPU pVCpu,
4020 IEMTASKSWITCH enmTaskSwitch,
4021 uint32_t uNextEip,
4022 uint32_t fFlags,
4023 uint16_t uErr,
4024 uint64_t uCr2,
4025 RTSEL SelTSS,
4026 PIEMSELDESC pNewDescTSS)
4027{
4028 Assert(!IEM_IS_REAL_MODE(pVCpu));
4029 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4030 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4031
4032 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4033 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4034 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4035 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4036 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4037
4038 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4039 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4040
4041 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4042 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4043
4044 /* Update CR2 in case it's a page-fault. */
4045 /** @todo This should probably be done much earlier in IEM/PGM. See
4046 * @bugref{5653#c49}. */
4047 if (fFlags & IEM_XCPT_FLAGS_CR2)
4048 pVCpu->cpum.GstCtx.cr2 = uCr2;
4049
4050 /*
4051 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4052 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4053 */
4054 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4055 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4056 if (uNewTSSLimit < uNewTSSLimitMin)
4057 {
4058 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4059 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4060 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4061 }
4062
4063 /*
4064 * Task switches in VMX non-root mode always cause task switches.
4065 * The new TSS must have been read and validated (DPL, limits etc.) before a
4066 * task-switch VM-exit commences.
4067 *
4068 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4069 */
4070 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4071 {
4072 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4073 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4074 }
4075
4076 /*
4077 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4078 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4079 */
4080 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4081 {
4082 uint32_t const uExitInfo1 = SelTSS;
4083 uint32_t uExitInfo2 = uErr;
4084 switch (enmTaskSwitch)
4085 {
4086 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4087 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4088 default: break;
4089 }
4090 if (fFlags & IEM_XCPT_FLAGS_ERR)
4091 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4092 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4093 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4094
4095 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4096 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4097 RT_NOREF2(uExitInfo1, uExitInfo2);
4098 }
4099
4100 /*
4101 * Check the current TSS limit. The last written byte to the current TSS during the
4102 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4103 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4104 *
4105 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4106 * end up with smaller than "legal" TSS limits.
4107 */
4108 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4109 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4110 if (uCurTSSLimit < uCurTSSLimitMin)
4111 {
4112 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4113 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4114 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4115 }
4116
4117 /*
4118 * Verify that the new TSS can be accessed and map it. Map only the required contents
4119 * and not the entire TSS.
4120 */
4121 void *pvNewTSS;
4122 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4123 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4124 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4125 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4126 * not perform correct translation if this happens. See Intel spec. 7.2.1
4127 * "Task-State Segment" */
4128 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4129 if (rcStrict != VINF_SUCCESS)
4130 {
4131 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4132 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4133 return rcStrict;
4134 }
4135
4136 /*
4137 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4138 */
4139 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4140 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4141 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4142 {
4143 PX86DESC pDescCurTSS;
4144 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4145 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4146 if (rcStrict != VINF_SUCCESS)
4147 {
4148 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4149 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4150 return rcStrict;
4151 }
4152
4153 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4154 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4155 if (rcStrict != VINF_SUCCESS)
4156 {
4157 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4158 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4159 return rcStrict;
4160 }
4161
4162 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4163 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4164 {
4165 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4166 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4167 u32EFlags &= ~X86_EFL_NT;
4168 }
4169 }
4170
4171 /*
4172 * Save the CPU state into the current TSS.
4173 */
4174 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4175 if (GCPtrNewTSS == GCPtrCurTSS)
4176 {
4177 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4178 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4179 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4180 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4181 pVCpu->cpum.GstCtx.ldtr.Sel));
4182 }
4183 if (fIsNewTSS386)
4184 {
4185 /*
4186 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4187 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4188 */
4189 void *pvCurTSS32;
4190 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4191 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4192 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4193 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4194 if (rcStrict != VINF_SUCCESS)
4195 {
4196 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4197 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4198 return rcStrict;
4199 }
4200
4201 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4202 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4203 pCurTSS32->eip = uNextEip;
4204 pCurTSS32->eflags = u32EFlags;
4205 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4206 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4207 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4208 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4209 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4210 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4211 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4212 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4213 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4214 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4215 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4216 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4217 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4218 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4219
4220 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4221 if (rcStrict != VINF_SUCCESS)
4222 {
4223 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4224 VBOXSTRICTRC_VAL(rcStrict)));
4225 return rcStrict;
4226 }
4227 }
4228 else
4229 {
4230 /*
4231 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4232 */
4233 void *pvCurTSS16;
4234 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4235 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4236 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4237 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4238 if (rcStrict != VINF_SUCCESS)
4239 {
4240 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4241 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4242 return rcStrict;
4243 }
4244
4245 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4246 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4247 pCurTSS16->ip = uNextEip;
4248 pCurTSS16->flags = u32EFlags;
4249 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4250 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4251 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4252 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4253 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4254 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4255 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4256 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4257 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4258 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4259 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4260 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4261
4262 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4263 if (rcStrict != VINF_SUCCESS)
4264 {
4265 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4266 VBOXSTRICTRC_VAL(rcStrict)));
4267 return rcStrict;
4268 }
4269 }
4270
4271 /*
4272 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4273 */
4274 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4275 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4276 {
4277 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4278 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4279 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4280 }
4281
4282 /*
4283 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4284 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4285 */
4286 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4287 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4288 bool fNewDebugTrap;
4289 if (fIsNewTSS386)
4290 {
4291 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4292 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4293 uNewEip = pNewTSS32->eip;
4294 uNewEflags = pNewTSS32->eflags;
4295 uNewEax = pNewTSS32->eax;
4296 uNewEcx = pNewTSS32->ecx;
4297 uNewEdx = pNewTSS32->edx;
4298 uNewEbx = pNewTSS32->ebx;
4299 uNewEsp = pNewTSS32->esp;
4300 uNewEbp = pNewTSS32->ebp;
4301 uNewEsi = pNewTSS32->esi;
4302 uNewEdi = pNewTSS32->edi;
4303 uNewES = pNewTSS32->es;
4304 uNewCS = pNewTSS32->cs;
4305 uNewSS = pNewTSS32->ss;
4306 uNewDS = pNewTSS32->ds;
4307 uNewFS = pNewTSS32->fs;
4308 uNewGS = pNewTSS32->gs;
4309 uNewLdt = pNewTSS32->selLdt;
4310 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4311 }
4312 else
4313 {
4314 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4315 uNewCr3 = 0;
4316 uNewEip = pNewTSS16->ip;
4317 uNewEflags = pNewTSS16->flags;
4318 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4319 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4320 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4321 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4322 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4323 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4324 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4325 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4326 uNewES = pNewTSS16->es;
4327 uNewCS = pNewTSS16->cs;
4328 uNewSS = pNewTSS16->ss;
4329 uNewDS = pNewTSS16->ds;
4330 uNewFS = 0;
4331 uNewGS = 0;
4332 uNewLdt = pNewTSS16->selLdt;
4333 fNewDebugTrap = false;
4334 }
4335
4336 if (GCPtrNewTSS == GCPtrCurTSS)
4337 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4338 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4339
4340 /*
4341 * We're done accessing the new TSS.
4342 */
4343 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4344 if (rcStrict != VINF_SUCCESS)
4345 {
4346 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4347 return rcStrict;
4348 }
4349
4350 /*
4351 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4352 */
4353 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4354 {
4355 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4356 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4357 if (rcStrict != VINF_SUCCESS)
4358 {
4359 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4360 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4361 return rcStrict;
4362 }
4363
4364 /* Check that the descriptor indicates the new TSS is available (not busy). */
4365 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4366 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4367 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4368
4369 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4370 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4371 if (rcStrict != VINF_SUCCESS)
4372 {
4373 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4374 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4375 return rcStrict;
4376 }
4377 }
4378
4379 /*
4380 * From this point on, we're technically in the new task. We will defer exceptions
4381 * until the completion of the task switch but before executing any instructions in the new task.
4382 */
4383 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4384 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4385 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4386 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4387 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4388 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4389 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4390
4391 /* Set the busy bit in TR. */
4392 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4393 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4394 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4395 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4396 {
4397 uNewEflags |= X86_EFL_NT;
4398 }
4399
4400 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4401 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4402 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4403
4404 pVCpu->cpum.GstCtx.eip = uNewEip;
4405 pVCpu->cpum.GstCtx.eax = uNewEax;
4406 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4407 pVCpu->cpum.GstCtx.edx = uNewEdx;
4408 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4409 pVCpu->cpum.GstCtx.esp = uNewEsp;
4410 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4411 pVCpu->cpum.GstCtx.esi = uNewEsi;
4412 pVCpu->cpum.GstCtx.edi = uNewEdi;
4413
4414 uNewEflags &= X86_EFL_LIVE_MASK;
4415 uNewEflags |= X86_EFL_RA1_MASK;
4416 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4417
4418 /*
4419 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4420 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4421 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4422 */
4423 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4424 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4425
4426 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4427 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4428
4429 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4430 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4431
4432 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4433 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4434
4435 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4436 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4437
4438 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4439 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4440 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4441
4442 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4443 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4444 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4445 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4446
4447 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4448 {
4449 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4450 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4451 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4452 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4453 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4454 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4455 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4456 }
4457
4458 /*
4459 * Switch CR3 for the new task.
4460 */
4461 if ( fIsNewTSS386
4462 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4463 {
4464 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4465 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4466 AssertRCSuccessReturn(rc, rc);
4467
4468 /* Inform PGM. */
4469 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4470 AssertRCReturn(rc, rc);
4471 /* ignore informational status codes */
4472
4473 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4474 }
4475
4476 /*
4477 * Switch LDTR for the new task.
4478 */
4479 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4480 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4481 else
4482 {
4483 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4484
4485 IEMSELDESC DescNewLdt;
4486 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4487 if (rcStrict != VINF_SUCCESS)
4488 {
4489 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4490 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4491 return rcStrict;
4492 }
4493 if ( !DescNewLdt.Legacy.Gen.u1Present
4494 || DescNewLdt.Legacy.Gen.u1DescType
4495 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4496 {
4497 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4498 uNewLdt, DescNewLdt.Legacy.u));
4499 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4500 }
4501
4502 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4503 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4504 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4505 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4506 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4507 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4508 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4509 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4510 }
4511
4512 IEMSELDESC DescSS;
4513 if (IEM_IS_V86_MODE(pVCpu))
4514 {
4515 pVCpu->iem.s.uCpl = 3;
4516 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4517 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4518 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4519 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4520 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4521 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4522
4523 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4524 DescSS.Legacy.u = 0;
4525 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4526 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4527 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4528 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4529 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4530 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4531 DescSS.Legacy.Gen.u2Dpl = 3;
4532 }
4533 else
4534 {
4535 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4536
4537 /*
4538 * Load the stack segment for the new task.
4539 */
4540 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4541 {
4542 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4543 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 /* Fetch the descriptor. */
4547 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4548 if (rcStrict != VINF_SUCCESS)
4549 {
4550 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4551 VBOXSTRICTRC_VAL(rcStrict)));
4552 return rcStrict;
4553 }
4554
4555 /* SS must be a data segment and writable. */
4556 if ( !DescSS.Legacy.Gen.u1DescType
4557 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4558 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4559 {
4560 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4561 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4562 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4563 }
4564
4565 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4566 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4567 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4568 {
4569 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4570 uNewCpl));
4571 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4572 }
4573
4574 /* Is it there? */
4575 if (!DescSS.Legacy.Gen.u1Present)
4576 {
4577 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4578 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4579 }
4580
4581 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4582 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4583
4584 /* Set the accessed bit before committing the result into SS. */
4585 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4586 {
4587 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4588 if (rcStrict != VINF_SUCCESS)
4589 return rcStrict;
4590 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4591 }
4592
4593 /* Commit SS. */
4594 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4595 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4596 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4597 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4598 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4599 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4600 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4601
4602 /* CPL has changed, update IEM before loading rest of segments. */
4603 pVCpu->iem.s.uCpl = uNewCpl;
4604
4605 /*
4606 * Load the data segments for the new task.
4607 */
4608 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4609 if (rcStrict != VINF_SUCCESS)
4610 return rcStrict;
4611 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4612 if (rcStrict != VINF_SUCCESS)
4613 return rcStrict;
4614 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4615 if (rcStrict != VINF_SUCCESS)
4616 return rcStrict;
4617 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4618 if (rcStrict != VINF_SUCCESS)
4619 return rcStrict;
4620
4621 /*
4622 * Load the code segment for the new task.
4623 */
4624 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4625 {
4626 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4627 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4628 }
4629
4630 /* Fetch the descriptor. */
4631 IEMSELDESC DescCS;
4632 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4633 if (rcStrict != VINF_SUCCESS)
4634 {
4635 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4636 return rcStrict;
4637 }
4638
4639 /* CS must be a code segment. */
4640 if ( !DescCS.Legacy.Gen.u1DescType
4641 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4642 {
4643 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4644 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4645 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4646 }
4647
4648 /* For conforming CS, DPL must be less than or equal to the RPL. */
4649 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4650 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4651 {
4652 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4653 DescCS.Legacy.Gen.u2Dpl));
4654 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4655 }
4656
4657 /* For non-conforming CS, DPL must match RPL. */
4658 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4659 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4660 {
4661 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4662 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4663 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4664 }
4665
4666 /* Is it there? */
4667 if (!DescCS.Legacy.Gen.u1Present)
4668 {
4669 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4670 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4671 }
4672
4673 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4674 u64Base = X86DESC_BASE(&DescCS.Legacy);
4675
4676 /* Set the accessed bit before committing the result into CS. */
4677 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4678 {
4679 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4680 if (rcStrict != VINF_SUCCESS)
4681 return rcStrict;
4682 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4683 }
4684
4685 /* Commit CS. */
4686 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4687 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4688 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4689 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4690 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4691 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4692 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4693 }
4694
4695 /** @todo Debug trap. */
4696 if (fIsNewTSS386 && fNewDebugTrap)
4697 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4698
4699 /*
4700 * Construct the error code masks based on what caused this task switch.
4701 * See Intel Instruction reference for INT.
4702 */
4703 uint16_t uExt;
4704 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4705 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4706 {
4707 uExt = 1;
4708 }
4709 else
4710 uExt = 0;
4711
4712 /*
4713 * Push any error code on to the new stack.
4714 */
4715 if (fFlags & IEM_XCPT_FLAGS_ERR)
4716 {
4717 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4718 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4719 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4720
4721 /* Check that there is sufficient space on the stack. */
4722 /** @todo Factor out segment limit checking for normal/expand down segments
4723 * into a separate function. */
4724 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4725 {
4726 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4727 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4728 {
4729 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4730 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4731 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4732 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4733 }
4734 }
4735 else
4736 {
4737 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4738 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4739 {
4740 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4741 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4742 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4743 }
4744 }
4745
4746
4747 if (fIsNewTSS386)
4748 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4749 else
4750 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4751 if (rcStrict != VINF_SUCCESS)
4752 {
4753 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4754 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4755 return rcStrict;
4756 }
4757 }
4758
4759 /* Check the new EIP against the new CS limit. */
4760 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4761 {
4762 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4763 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4764 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4765 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4766 }
4767
4768 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4769 pVCpu->cpum.GstCtx.ss.Sel));
4770 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4771}
4772
4773
4774/**
4775 * Implements exceptions and interrupts for protected mode.
4776 *
4777 * @returns VBox strict status code.
4778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4779 * @param cbInstr The number of bytes to offset rIP by in the return
4780 * address.
4781 * @param u8Vector The interrupt / exception vector number.
4782 * @param fFlags The flags.
4783 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4784 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4785 */
4786IEM_STATIC VBOXSTRICTRC
4787iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4788 uint8_t cbInstr,
4789 uint8_t u8Vector,
4790 uint32_t fFlags,
4791 uint16_t uErr,
4792 uint64_t uCr2)
4793{
4794 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4795
4796 /*
4797 * Read the IDT entry.
4798 */
4799 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4800 {
4801 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4802 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4803 }
4804 X86DESC Idte;
4805 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4806 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4807 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4808 {
4809 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4810 return rcStrict;
4811 }
4812 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4813 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4814 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4815
4816 /*
4817 * Check the descriptor type, DPL and such.
4818 * ASSUMES this is done in the same order as described for call-gate calls.
4819 */
4820 if (Idte.Gate.u1DescType)
4821 {
4822 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4823 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4824 }
4825 bool fTaskGate = false;
4826 uint8_t f32BitGate = true;
4827 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4828 switch (Idte.Gate.u4Type)
4829 {
4830 case X86_SEL_TYPE_SYS_UNDEFINED:
4831 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4832 case X86_SEL_TYPE_SYS_LDT:
4833 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4834 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4835 case X86_SEL_TYPE_SYS_UNDEFINED2:
4836 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4837 case X86_SEL_TYPE_SYS_UNDEFINED3:
4838 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4839 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4840 case X86_SEL_TYPE_SYS_UNDEFINED4:
4841 {
4842 /** @todo check what actually happens when the type is wrong...
4843 * esp. call gates. */
4844 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4845 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4846 }
4847
4848 case X86_SEL_TYPE_SYS_286_INT_GATE:
4849 f32BitGate = false;
4850 RT_FALL_THRU();
4851 case X86_SEL_TYPE_SYS_386_INT_GATE:
4852 fEflToClear |= X86_EFL_IF;
4853 break;
4854
4855 case X86_SEL_TYPE_SYS_TASK_GATE:
4856 fTaskGate = true;
4857#ifndef IEM_IMPLEMENTS_TASKSWITCH
4858 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4859#endif
4860 break;
4861
4862 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4863 f32BitGate = false;
4864 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4865 break;
4866
4867 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4868 }
4869
4870 /* Check DPL against CPL if applicable. */
4871 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4872 {
4873 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4874 {
4875 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4876 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4877 }
4878 }
4879
4880 /* Is it there? */
4881 if (!Idte.Gate.u1Present)
4882 {
4883 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4884 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4885 }
4886
4887 /* Is it a task-gate? */
4888 if (fTaskGate)
4889 {
4890 /*
4891 * Construct the error code masks based on what caused this task switch.
4892 * See Intel Instruction reference for INT.
4893 */
4894 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4895 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4896 RTSEL SelTSS = Idte.Gate.u16Sel;
4897
4898 /*
4899 * Fetch the TSS descriptor in the GDT.
4900 */
4901 IEMSELDESC DescTSS;
4902 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4903 if (rcStrict != VINF_SUCCESS)
4904 {
4905 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4906 VBOXSTRICTRC_VAL(rcStrict)));
4907 return rcStrict;
4908 }
4909
4910 /* The TSS descriptor must be a system segment and be available (not busy). */
4911 if ( DescTSS.Legacy.Gen.u1DescType
4912 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4913 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4914 {
4915 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4916 u8Vector, SelTSS, DescTSS.Legacy.au64));
4917 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4918 }
4919
4920 /* The TSS must be present. */
4921 if (!DescTSS.Legacy.Gen.u1Present)
4922 {
4923 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4924 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4925 }
4926
4927 /* Do the actual task switch. */
4928 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4929 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4930 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4931 }
4932
4933 /* A null CS is bad. */
4934 RTSEL NewCS = Idte.Gate.u16Sel;
4935 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4936 {
4937 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4938 return iemRaiseGeneralProtectionFault0(pVCpu);
4939 }
4940
4941 /* Fetch the descriptor for the new CS. */
4942 IEMSELDESC DescCS;
4943 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4944 if (rcStrict != VINF_SUCCESS)
4945 {
4946 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4947 return rcStrict;
4948 }
4949
4950 /* Must be a code segment. */
4951 if (!DescCS.Legacy.Gen.u1DescType)
4952 {
4953 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4954 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4955 }
4956 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4957 {
4958 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4959 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4960 }
4961
4962 /* Don't allow lowering the privilege level. */
4963 /** @todo Does the lowering of privileges apply to software interrupts
4964 * only? This has bearings on the more-privileged or
4965 * same-privilege stack behavior further down. A testcase would
4966 * be nice. */
4967 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4968 {
4969 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4970 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4971 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4972 }
4973
4974 /* Make sure the selector is present. */
4975 if (!DescCS.Legacy.Gen.u1Present)
4976 {
4977 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4978 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4979 }
4980
4981 /* Check the new EIP against the new CS limit. */
4982 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4983 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4984 ? Idte.Gate.u16OffsetLow
4985 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4986 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4987 if (uNewEip > cbLimitCS)
4988 {
4989 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4990 u8Vector, uNewEip, cbLimitCS, NewCS));
4991 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4992 }
4993 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4994
4995 /* Calc the flag image to push. */
4996 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4997 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4998 fEfl &= ~X86_EFL_RF;
4999 else
5000 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5001
5002 /* From V8086 mode only go to CPL 0. */
5003 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5004 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5005 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5006 {
5007 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5008 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5009 }
5010
5011 /*
5012 * If the privilege level changes, we need to get a new stack from the TSS.
5013 * This in turns means validating the new SS and ESP...
5014 */
5015 if (uNewCpl != pVCpu->iem.s.uCpl)
5016 {
5017 RTSEL NewSS;
5018 uint32_t uNewEsp;
5019 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5020 if (rcStrict != VINF_SUCCESS)
5021 return rcStrict;
5022
5023 IEMSELDESC DescSS;
5024 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5025 if (rcStrict != VINF_SUCCESS)
5026 return rcStrict;
5027 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5028 if (!DescSS.Legacy.Gen.u1DefBig)
5029 {
5030 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5031 uNewEsp = (uint16_t)uNewEsp;
5032 }
5033
5034 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5035
5036 /* Check that there is sufficient space for the stack frame. */
5037 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5038 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5039 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5040 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5041
5042 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5043 {
5044 if ( uNewEsp - 1 > cbLimitSS
5045 || uNewEsp < cbStackFrame)
5046 {
5047 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5048 u8Vector, NewSS, uNewEsp, cbStackFrame));
5049 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5050 }
5051 }
5052 else
5053 {
5054 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5055 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5056 {
5057 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5058 u8Vector, NewSS, uNewEsp, cbStackFrame));
5059 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5060 }
5061 }
5062
5063 /*
5064 * Start making changes.
5065 */
5066
5067 /* Set the new CPL so that stack accesses use it. */
5068 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5069 pVCpu->iem.s.uCpl = uNewCpl;
5070
5071 /* Create the stack frame. */
5072 RTPTRUNION uStackFrame;
5073 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5074 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5075 if (rcStrict != VINF_SUCCESS)
5076 return rcStrict;
5077 void * const pvStackFrame = uStackFrame.pv;
5078 if (f32BitGate)
5079 {
5080 if (fFlags & IEM_XCPT_FLAGS_ERR)
5081 *uStackFrame.pu32++ = uErr;
5082 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5083 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5084 uStackFrame.pu32[2] = fEfl;
5085 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5086 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5087 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5088 if (fEfl & X86_EFL_VM)
5089 {
5090 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5091 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5092 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5093 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5094 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5095 }
5096 }
5097 else
5098 {
5099 if (fFlags & IEM_XCPT_FLAGS_ERR)
5100 *uStackFrame.pu16++ = uErr;
5101 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5102 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5103 uStackFrame.pu16[2] = fEfl;
5104 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5105 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5106 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5107 if (fEfl & X86_EFL_VM)
5108 {
5109 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5110 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5111 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5112 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5113 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5114 }
5115 }
5116 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5117 if (rcStrict != VINF_SUCCESS)
5118 return rcStrict;
5119
5120 /* Mark the selectors 'accessed' (hope this is the correct time). */
5121 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5122 * after pushing the stack frame? (Write protect the gdt + stack to
5123 * find out.) */
5124 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5125 {
5126 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5127 if (rcStrict != VINF_SUCCESS)
5128 return rcStrict;
5129 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5130 }
5131
5132 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5133 {
5134 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5135 if (rcStrict != VINF_SUCCESS)
5136 return rcStrict;
5137 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5138 }
5139
5140 /*
5141 * Start comitting the register changes (joins with the DPL=CPL branch).
5142 */
5143 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5144 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5145 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5146 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5147 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5148 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5149 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5150 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5151 * SP is loaded).
5152 * Need to check the other combinations too:
5153 * - 16-bit TSS, 32-bit handler
5154 * - 32-bit TSS, 16-bit handler */
5155 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5156 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5157 else
5158 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5159
5160 if (fEfl & X86_EFL_VM)
5161 {
5162 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5163 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5164 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5165 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5166 }
5167 }
5168 /*
5169 * Same privilege, no stack change and smaller stack frame.
5170 */
5171 else
5172 {
5173 uint64_t uNewRsp;
5174 RTPTRUNION uStackFrame;
5175 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5176 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5177 if (rcStrict != VINF_SUCCESS)
5178 return rcStrict;
5179 void * const pvStackFrame = uStackFrame.pv;
5180
5181 if (f32BitGate)
5182 {
5183 if (fFlags & IEM_XCPT_FLAGS_ERR)
5184 *uStackFrame.pu32++ = uErr;
5185 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5186 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5187 uStackFrame.pu32[2] = fEfl;
5188 }
5189 else
5190 {
5191 if (fFlags & IEM_XCPT_FLAGS_ERR)
5192 *uStackFrame.pu16++ = uErr;
5193 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5194 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5195 uStackFrame.pu16[2] = fEfl;
5196 }
5197 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5198 if (rcStrict != VINF_SUCCESS)
5199 return rcStrict;
5200
5201 /* Mark the CS selector as 'accessed'. */
5202 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5203 {
5204 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5205 if (rcStrict != VINF_SUCCESS)
5206 return rcStrict;
5207 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5208 }
5209
5210 /*
5211 * Start committing the register changes (joins with the other branch).
5212 */
5213 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5214 }
5215
5216 /* ... register committing continues. */
5217 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5218 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5219 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5220 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5221 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5222 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5223
5224 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5225 fEfl &= ~fEflToClear;
5226 IEMMISC_SET_EFL(pVCpu, fEfl);
5227
5228 if (fFlags & IEM_XCPT_FLAGS_CR2)
5229 pVCpu->cpum.GstCtx.cr2 = uCr2;
5230
5231 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5232 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5233
5234 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5235}
5236
5237
5238/**
5239 * Implements exceptions and interrupts for long mode.
5240 *
5241 * @returns VBox strict status code.
5242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5243 * @param cbInstr The number of bytes to offset rIP by in the return
5244 * address.
5245 * @param u8Vector The interrupt / exception vector number.
5246 * @param fFlags The flags.
5247 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5248 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5249 */
5250IEM_STATIC VBOXSTRICTRC
5251iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5252 uint8_t cbInstr,
5253 uint8_t u8Vector,
5254 uint32_t fFlags,
5255 uint16_t uErr,
5256 uint64_t uCr2)
5257{
5258 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5259
5260 /*
5261 * Read the IDT entry.
5262 */
5263 uint16_t offIdt = (uint16_t)u8Vector << 4;
5264 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5265 {
5266 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5267 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5268 }
5269 X86DESC64 Idte;
5270 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5271 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5272 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5273 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5276 return rcStrict;
5277 }
5278 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5279 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5280 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5281
5282 /*
5283 * Check the descriptor type, DPL and such.
5284 * ASSUMES this is done in the same order as described for call-gate calls.
5285 */
5286 if (Idte.Gate.u1DescType)
5287 {
5288 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5289 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5290 }
5291 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5292 switch (Idte.Gate.u4Type)
5293 {
5294 case AMD64_SEL_TYPE_SYS_INT_GATE:
5295 fEflToClear |= X86_EFL_IF;
5296 break;
5297 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5298 break;
5299
5300 default:
5301 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5302 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5303 }
5304
5305 /* Check DPL against CPL if applicable. */
5306 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5307 {
5308 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5309 {
5310 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5311 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5312 }
5313 }
5314
5315 /* Is it there? */
5316 if (!Idte.Gate.u1Present)
5317 {
5318 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5319 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5320 }
5321
5322 /* A null CS is bad. */
5323 RTSEL NewCS = Idte.Gate.u16Sel;
5324 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5325 {
5326 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5327 return iemRaiseGeneralProtectionFault0(pVCpu);
5328 }
5329
5330 /* Fetch the descriptor for the new CS. */
5331 IEMSELDESC DescCS;
5332 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5333 if (rcStrict != VINF_SUCCESS)
5334 {
5335 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5336 return rcStrict;
5337 }
5338
5339 /* Must be a 64-bit code segment. */
5340 if (!DescCS.Long.Gen.u1DescType)
5341 {
5342 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5343 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5344 }
5345 if ( !DescCS.Long.Gen.u1Long
5346 || DescCS.Long.Gen.u1DefBig
5347 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5348 {
5349 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5350 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5351 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5352 }
5353
5354 /* Don't allow lowering the privilege level. For non-conforming CS
5355 selectors, the CS.DPL sets the privilege level the trap/interrupt
5356 handler runs at. For conforming CS selectors, the CPL remains
5357 unchanged, but the CS.DPL must be <= CPL. */
5358 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5359 * when CPU in Ring-0. Result \#GP? */
5360 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5361 {
5362 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5363 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5364 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5365 }
5366
5367
5368 /* Make sure the selector is present. */
5369 if (!DescCS.Legacy.Gen.u1Present)
5370 {
5371 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5372 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5373 }
5374
5375 /* Check that the new RIP is canonical. */
5376 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5377 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5378 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5379 if (!IEM_IS_CANONICAL(uNewRip))
5380 {
5381 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5382 return iemRaiseGeneralProtectionFault0(pVCpu);
5383 }
5384
5385 /*
5386 * If the privilege level changes or if the IST isn't zero, we need to get
5387 * a new stack from the TSS.
5388 */
5389 uint64_t uNewRsp;
5390 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5391 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5392 if ( uNewCpl != pVCpu->iem.s.uCpl
5393 || Idte.Gate.u3IST != 0)
5394 {
5395 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5396 if (rcStrict != VINF_SUCCESS)
5397 return rcStrict;
5398 }
5399 else
5400 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5401 uNewRsp &= ~(uint64_t)0xf;
5402
5403 /*
5404 * Calc the flag image to push.
5405 */
5406 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5407 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5408 fEfl &= ~X86_EFL_RF;
5409 else
5410 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5411
5412 /*
5413 * Start making changes.
5414 */
5415 /* Set the new CPL so that stack accesses use it. */
5416 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5417 pVCpu->iem.s.uCpl = uNewCpl;
5418
5419 /* Create the stack frame. */
5420 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5421 RTPTRUNION uStackFrame;
5422 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5423 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5424 if (rcStrict != VINF_SUCCESS)
5425 return rcStrict;
5426 void * const pvStackFrame = uStackFrame.pv;
5427
5428 if (fFlags & IEM_XCPT_FLAGS_ERR)
5429 *uStackFrame.pu64++ = uErr;
5430 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5431 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5432 uStackFrame.pu64[2] = fEfl;
5433 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5434 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5435 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5436 if (rcStrict != VINF_SUCCESS)
5437 return rcStrict;
5438
5439 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5440 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5441 * after pushing the stack frame? (Write protect the gdt + stack to
5442 * find out.) */
5443 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5444 {
5445 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5446 if (rcStrict != VINF_SUCCESS)
5447 return rcStrict;
5448 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5449 }
5450
5451 /*
5452 * Start comitting the register changes.
5453 */
5454 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5455 * hidden registers when interrupting 32-bit or 16-bit code! */
5456 if (uNewCpl != uOldCpl)
5457 {
5458 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5459 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5460 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5461 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5462 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5463 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5464 }
5465 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5466 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5467 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5468 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5469 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5470 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5471 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5472 pVCpu->cpum.GstCtx.rip = uNewRip;
5473
5474 fEfl &= ~fEflToClear;
5475 IEMMISC_SET_EFL(pVCpu, fEfl);
5476
5477 if (fFlags & IEM_XCPT_FLAGS_CR2)
5478 pVCpu->cpum.GstCtx.cr2 = uCr2;
5479
5480 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5481 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5482
5483 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5484}
5485
5486
5487/**
5488 * Implements exceptions and interrupts.
5489 *
5490 * All exceptions and interrupts goes thru this function!
5491 *
5492 * @returns VBox strict status code.
5493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5494 * @param cbInstr The number of bytes to offset rIP by in the return
5495 * address.
5496 * @param u8Vector The interrupt / exception vector number.
5497 * @param fFlags The flags.
5498 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5499 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5500 */
5501DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5502iemRaiseXcptOrInt(PVMCPU pVCpu,
5503 uint8_t cbInstr,
5504 uint8_t u8Vector,
5505 uint32_t fFlags,
5506 uint16_t uErr,
5507 uint64_t uCr2)
5508{
5509 /*
5510 * Get all the state that we might need here.
5511 */
5512 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5513 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5514
5515#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5516 /*
5517 * Flush prefetch buffer
5518 */
5519 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5520#endif
5521
5522 /*
5523 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5524 */
5525 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5526 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5527 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5528 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5529 {
5530 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5531 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5532 u8Vector = X86_XCPT_GP;
5533 uErr = 0;
5534 }
5535#ifdef DBGFTRACE_ENABLED
5536 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5537 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5538 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5539#endif
5540
5541 /*
5542 * Evaluate whether NMI blocking should be in effect.
5543 * Normally, NMI blocking is in effect whenever we inject an NMI.
5544 */
5545 bool fBlockNmi;
5546 if ( u8Vector == X86_XCPT_NMI
5547 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5548 fBlockNmi = true;
5549 else
5550 fBlockNmi = false;
5551
5552#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5553 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5554 {
5555 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5556 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5557 return rcStrict0;
5558
5559 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5560 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5561 {
5562 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5563 fBlockNmi = false;
5564 }
5565 }
5566#endif
5567
5568#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5569 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5570 {
5571 /*
5572 * If the event is being injected as part of VMRUN, it isn't subject to event
5573 * intercepts in the nested-guest. However, secondary exceptions that occur
5574 * during injection of any event -are- subject to exception intercepts.
5575 *
5576 * See AMD spec. 15.20 "Event Injection".
5577 */
5578 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5579 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5580 else
5581 {
5582 /*
5583 * Check and handle if the event being raised is intercepted.
5584 */
5585 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5586 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5587 return rcStrict0;
5588 }
5589 }
5590#endif
5591
5592 /*
5593 * Set NMI blocking if necessary.
5594 */
5595 if ( fBlockNmi
5596 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5597 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5598
5599 /*
5600 * Do recursion accounting.
5601 */
5602 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5603 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5604 if (pVCpu->iem.s.cXcptRecursions == 0)
5605 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5606 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5607 else
5608 {
5609 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5610 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5611 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5612
5613 if (pVCpu->iem.s.cXcptRecursions >= 4)
5614 {
5615#ifdef DEBUG_bird
5616 AssertFailed();
5617#endif
5618 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5619 }
5620
5621 /*
5622 * Evaluate the sequence of recurring events.
5623 */
5624 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5625 NULL /* pXcptRaiseInfo */);
5626 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5627 { /* likely */ }
5628 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5629 {
5630 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5631 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5632 u8Vector = X86_XCPT_DF;
5633 uErr = 0;
5634#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5635 /* VMX nested-guest #DF intercept needs to be checked here. */
5636 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5637 {
5638 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5639 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5640 return rcStrict0;
5641 }
5642#endif
5643 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5644 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5645 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5646 }
5647 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5648 {
5649 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5650 return iemInitiateCpuShutdown(pVCpu);
5651 }
5652 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5653 {
5654 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5655 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5656 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5657 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5658 return VERR_EM_GUEST_CPU_HANG;
5659 }
5660 else
5661 {
5662 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5663 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5664 return VERR_IEM_IPE_9;
5665 }
5666
5667 /*
5668 * The 'EXT' bit is set when an exception occurs during deliver of an external
5669 * event (such as an interrupt or earlier exception)[1]. Privileged software
5670 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5671 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5672 *
5673 * [1] - Intel spec. 6.13 "Error Code"
5674 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5675 * [3] - Intel Instruction reference for INT n.
5676 */
5677 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5678 && (fFlags & IEM_XCPT_FLAGS_ERR)
5679 && u8Vector != X86_XCPT_PF
5680 && u8Vector != X86_XCPT_DF)
5681 {
5682 uErr |= X86_TRAP_ERR_EXTERNAL;
5683 }
5684 }
5685
5686 pVCpu->iem.s.cXcptRecursions++;
5687 pVCpu->iem.s.uCurXcpt = u8Vector;
5688 pVCpu->iem.s.fCurXcpt = fFlags;
5689 pVCpu->iem.s.uCurXcptErr = uErr;
5690 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5691
5692 /*
5693 * Extensive logging.
5694 */
5695#if defined(LOG_ENABLED) && defined(IN_RING3)
5696 if (LogIs3Enabled())
5697 {
5698 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5699 PVM pVM = pVCpu->CTX_SUFF(pVM);
5700 char szRegs[4096];
5701 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5702 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5703 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5704 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5705 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5706 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5707 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5708 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5709 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5710 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5711 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5712 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5713 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5714 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5715 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5716 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5717 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5718 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5719 " efer=%016VR{efer}\n"
5720 " pat=%016VR{pat}\n"
5721 " sf_mask=%016VR{sf_mask}\n"
5722 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5723 " lstar=%016VR{lstar}\n"
5724 " star=%016VR{star} cstar=%016VR{cstar}\n"
5725 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5726 );
5727
5728 char szInstr[256];
5729 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5730 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5731 szInstr, sizeof(szInstr), NULL);
5732 Log3(("%s%s\n", szRegs, szInstr));
5733 }
5734#endif /* LOG_ENABLED */
5735
5736 /*
5737 * Call the mode specific worker function.
5738 */
5739 VBOXSTRICTRC rcStrict;
5740 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5741 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5742 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5743 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5744 else
5745 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5746
5747 /* Flush the prefetch buffer. */
5748#ifdef IEM_WITH_CODE_TLB
5749 pVCpu->iem.s.pbInstrBuf = NULL;
5750#else
5751 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5752#endif
5753
5754 /*
5755 * Unwind.
5756 */
5757 pVCpu->iem.s.cXcptRecursions--;
5758 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5759 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5760 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5761 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5762 pVCpu->iem.s.cXcptRecursions + 1));
5763 return rcStrict;
5764}
5765
5766#ifdef IEM_WITH_SETJMP
5767/**
5768 * See iemRaiseXcptOrInt. Will not return.
5769 */
5770IEM_STATIC DECL_NO_RETURN(void)
5771iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5772 uint8_t cbInstr,
5773 uint8_t u8Vector,
5774 uint32_t fFlags,
5775 uint16_t uErr,
5776 uint64_t uCr2)
5777{
5778 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5779 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5780}
5781#endif
5782
5783
5784/** \#DE - 00. */
5785DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5786{
5787 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5788}
5789
5790
5791/** \#DB - 01.
5792 * @note This automatically clear DR7.GD. */
5793DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5794{
5795 /** @todo set/clear RF. */
5796 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5797 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5798}
5799
5800
5801/** \#BR - 05. */
5802DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5803{
5804 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5805}
5806
5807
5808/** \#UD - 06. */
5809DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5810{
5811 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5812}
5813
5814
5815/** \#NM - 07. */
5816DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5817{
5818 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5819}
5820
5821
5822/** \#TS(err) - 0a. */
5823DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5824{
5825 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5826}
5827
5828
5829/** \#TS(tr) - 0a. */
5830DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5831{
5832 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5833 pVCpu->cpum.GstCtx.tr.Sel, 0);
5834}
5835
5836
5837/** \#TS(0) - 0a. */
5838DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5839{
5840 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5841 0, 0);
5842}
5843
5844
5845/** \#TS(err) - 0a. */
5846DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5847{
5848 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5849 uSel & X86_SEL_MASK_OFF_RPL, 0);
5850}
5851
5852
5853/** \#NP(err) - 0b. */
5854DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5855{
5856 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5857}
5858
5859
5860/** \#NP(sel) - 0b. */
5861DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5862{
5863 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5864 uSel & ~X86_SEL_RPL, 0);
5865}
5866
5867
5868/** \#SS(seg) - 0c. */
5869DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5870{
5871 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5872 uSel & ~X86_SEL_RPL, 0);
5873}
5874
5875
5876/** \#SS(err) - 0c. */
5877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5878{
5879 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5880}
5881
5882
5883/** \#GP(n) - 0d. */
5884DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5885{
5886 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5887}
5888
5889
5890/** \#GP(0) - 0d. */
5891DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5892{
5893 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5894}
5895
5896#ifdef IEM_WITH_SETJMP
5897/** \#GP(0) - 0d. */
5898DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5899{
5900 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5901}
5902#endif
5903
5904
5905/** \#GP(sel) - 0d. */
5906DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5907{
5908 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5909 Sel & ~X86_SEL_RPL, 0);
5910}
5911
5912
5913/** \#GP(0) - 0d. */
5914DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5915{
5916 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5917}
5918
5919
5920/** \#GP(sel) - 0d. */
5921DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5922{
5923 NOREF(iSegReg); NOREF(fAccess);
5924 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5925 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5926}
5927
5928#ifdef IEM_WITH_SETJMP
5929/** \#GP(sel) - 0d, longjmp. */
5930DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5931{
5932 NOREF(iSegReg); NOREF(fAccess);
5933 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5934 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5935}
5936#endif
5937
5938/** \#GP(sel) - 0d. */
5939DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5940{
5941 NOREF(Sel);
5942 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5943}
5944
5945#ifdef IEM_WITH_SETJMP
5946/** \#GP(sel) - 0d, longjmp. */
5947DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5948{
5949 NOREF(Sel);
5950 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5951}
5952#endif
5953
5954
5955/** \#GP(sel) - 0d. */
5956DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5957{
5958 NOREF(iSegReg); NOREF(fAccess);
5959 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5960}
5961
5962#ifdef IEM_WITH_SETJMP
5963/** \#GP(sel) - 0d, longjmp. */
5964DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5965 uint32_t fAccess)
5966{
5967 NOREF(iSegReg); NOREF(fAccess);
5968 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5969}
5970#endif
5971
5972
5973/** \#PF(n) - 0e. */
5974DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5975{
5976 uint16_t uErr;
5977 switch (rc)
5978 {
5979 case VERR_PAGE_NOT_PRESENT:
5980 case VERR_PAGE_TABLE_NOT_PRESENT:
5981 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5982 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5983 uErr = 0;
5984 break;
5985
5986 default:
5987 AssertMsgFailed(("%Rrc\n", rc));
5988 RT_FALL_THRU();
5989 case VERR_ACCESS_DENIED:
5990 uErr = X86_TRAP_PF_P;
5991 break;
5992
5993 /** @todo reserved */
5994 }
5995
5996 if (pVCpu->iem.s.uCpl == 3)
5997 uErr |= X86_TRAP_PF_US;
5998
5999 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
6000 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6001 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
6002 uErr |= X86_TRAP_PF_ID;
6003
6004#if 0 /* This is so much non-sense, really. Why was it done like that? */
6005 /* Note! RW access callers reporting a WRITE protection fault, will clear
6006 the READ flag before calling. So, read-modify-write accesses (RW)
6007 can safely be reported as READ faults. */
6008 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6009 uErr |= X86_TRAP_PF_RW;
6010#else
6011 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6012 {
6013 if (!(fAccess & IEM_ACCESS_TYPE_READ))
6014 uErr |= X86_TRAP_PF_RW;
6015 }
6016#endif
6017
6018 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6019 uErr, GCPtrWhere);
6020}
6021
6022#ifdef IEM_WITH_SETJMP
6023/** \#PF(n) - 0e, longjmp. */
6024IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6025{
6026 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6027}
6028#endif
6029
6030
6031/** \#MF(0) - 10. */
6032DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6033{
6034 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6035}
6036
6037
6038/** \#AC(0) - 11. */
6039DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6040{
6041 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6042}
6043
6044
6045/**
6046 * Macro for calling iemCImplRaiseDivideError().
6047 *
6048 * This enables us to add/remove arguments and force different levels of
6049 * inlining as we wish.
6050 *
6051 * @return Strict VBox status code.
6052 */
6053#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6054IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6055{
6056 NOREF(cbInstr);
6057 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6058}
6059
6060
6061/**
6062 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6063 *
6064 * This enables us to add/remove arguments and force different levels of
6065 * inlining as we wish.
6066 *
6067 * @return Strict VBox status code.
6068 */
6069#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6070IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6071{
6072 NOREF(cbInstr);
6073 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6074}
6075
6076
6077/**
6078 * Macro for calling iemCImplRaiseInvalidOpcode().
6079 *
6080 * This enables us to add/remove arguments and force different levels of
6081 * inlining as we wish.
6082 *
6083 * @return Strict VBox status code.
6084 */
6085#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6086IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6087{
6088 NOREF(cbInstr);
6089 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6090}
6091
6092
6093/** @} */
6094
6095
6096/*
6097 *
6098 * Helpers routines.
6099 * Helpers routines.
6100 * Helpers routines.
6101 *
6102 */
6103
6104/**
6105 * Recalculates the effective operand size.
6106 *
6107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6108 */
6109IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6110{
6111 switch (pVCpu->iem.s.enmCpuMode)
6112 {
6113 case IEMMODE_16BIT:
6114 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6115 break;
6116 case IEMMODE_32BIT:
6117 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6118 break;
6119 case IEMMODE_64BIT:
6120 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6121 {
6122 case 0:
6123 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6124 break;
6125 case IEM_OP_PRF_SIZE_OP:
6126 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6127 break;
6128 case IEM_OP_PRF_SIZE_REX_W:
6129 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6130 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6131 break;
6132 }
6133 break;
6134 default:
6135 AssertFailed();
6136 }
6137}
6138
6139
6140/**
6141 * Sets the default operand size to 64-bit and recalculates the effective
6142 * operand size.
6143 *
6144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6145 */
6146IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6147{
6148 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6149 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6150 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6151 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6152 else
6153 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6154}
6155
6156
6157/*
6158 *
6159 * Common opcode decoders.
6160 * Common opcode decoders.
6161 * Common opcode decoders.
6162 *
6163 */
6164//#include <iprt/mem.h>
6165
6166/**
6167 * Used to add extra details about a stub case.
6168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6169 */
6170IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6171{
6172#if defined(LOG_ENABLED) && defined(IN_RING3)
6173 PVM pVM = pVCpu->CTX_SUFF(pVM);
6174 char szRegs[4096];
6175 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6176 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6177 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6178 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6179 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6180 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6181 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6182 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6183 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6184 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6185 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6186 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6187 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6188 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6189 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6190 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6191 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6192 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6193 " efer=%016VR{efer}\n"
6194 " pat=%016VR{pat}\n"
6195 " sf_mask=%016VR{sf_mask}\n"
6196 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6197 " lstar=%016VR{lstar}\n"
6198 " star=%016VR{star} cstar=%016VR{cstar}\n"
6199 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6200 );
6201
6202 char szInstr[256];
6203 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6204 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6205 szInstr, sizeof(szInstr), NULL);
6206
6207 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6208#else
6209 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6210#endif
6211}
6212
6213/**
6214 * Complains about a stub.
6215 *
6216 * Providing two versions of this macro, one for daily use and one for use when
6217 * working on IEM.
6218 */
6219#if 0
6220# define IEMOP_BITCH_ABOUT_STUB() \
6221 do { \
6222 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6223 iemOpStubMsg2(pVCpu); \
6224 RTAssertPanic(); \
6225 } while (0)
6226#else
6227# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6228#endif
6229
6230/** Stubs an opcode. */
6231#define FNIEMOP_STUB(a_Name) \
6232 FNIEMOP_DEF(a_Name) \
6233 { \
6234 RT_NOREF_PV(pVCpu); \
6235 IEMOP_BITCH_ABOUT_STUB(); \
6236 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6237 } \
6238 typedef int ignore_semicolon
6239
6240/** Stubs an opcode. */
6241#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6242 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6243 { \
6244 RT_NOREF_PV(pVCpu); \
6245 RT_NOREF_PV(a_Name0); \
6246 IEMOP_BITCH_ABOUT_STUB(); \
6247 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6248 } \
6249 typedef int ignore_semicolon
6250
6251/** Stubs an opcode which currently should raise \#UD. */
6252#define FNIEMOP_UD_STUB(a_Name) \
6253 FNIEMOP_DEF(a_Name) \
6254 { \
6255 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6256 return IEMOP_RAISE_INVALID_OPCODE(); \
6257 } \
6258 typedef int ignore_semicolon
6259
6260/** Stubs an opcode which currently should raise \#UD. */
6261#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6262 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6263 { \
6264 RT_NOREF_PV(pVCpu); \
6265 RT_NOREF_PV(a_Name0); \
6266 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6267 return IEMOP_RAISE_INVALID_OPCODE(); \
6268 } \
6269 typedef int ignore_semicolon
6270
6271
6272
6273/** @name Register Access.
6274 * @{
6275 */
6276
6277/**
6278 * Gets a reference (pointer) to the specified hidden segment register.
6279 *
6280 * @returns Hidden register reference.
6281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6282 * @param iSegReg The segment register.
6283 */
6284IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6285{
6286 Assert(iSegReg < X86_SREG_COUNT);
6287 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6288 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6289
6290#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6291 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6292 { /* likely */ }
6293 else
6294 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6295#else
6296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6297#endif
6298 return pSReg;
6299}
6300
6301
6302/**
6303 * Ensures that the given hidden segment register is up to date.
6304 *
6305 * @returns Hidden register reference.
6306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6307 * @param pSReg The segment register.
6308 */
6309IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6310{
6311#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6312 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6313 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6314#else
6315 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6316 NOREF(pVCpu);
6317#endif
6318 return pSReg;
6319}
6320
6321
6322/**
6323 * Gets a reference (pointer) to the specified segment register (the selector
6324 * value).
6325 *
6326 * @returns Pointer to the selector variable.
6327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6328 * @param iSegReg The segment register.
6329 */
6330DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6331{
6332 Assert(iSegReg < X86_SREG_COUNT);
6333 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6334 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6335}
6336
6337
6338/**
6339 * Fetches the selector value of a segment register.
6340 *
6341 * @returns The selector value.
6342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6343 * @param iSegReg The segment register.
6344 */
6345DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6346{
6347 Assert(iSegReg < X86_SREG_COUNT);
6348 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6349 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6350}
6351
6352
6353/**
6354 * Fetches the base address value of a segment register.
6355 *
6356 * @returns The selector value.
6357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6358 * @param iSegReg The segment register.
6359 */
6360DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6361{
6362 Assert(iSegReg < X86_SREG_COUNT);
6363 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6364 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6365}
6366
6367
6368/**
6369 * Gets a reference (pointer) to the specified general purpose register.
6370 *
6371 * @returns Register reference.
6372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6373 * @param iReg The general purpose register.
6374 */
6375DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6376{
6377 Assert(iReg < 16);
6378 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6379}
6380
6381
6382/**
6383 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6384 *
6385 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6386 *
6387 * @returns Register reference.
6388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6389 * @param iReg The register.
6390 */
6391DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6392{
6393 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6394 {
6395 Assert(iReg < 16);
6396 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6397 }
6398 /* high 8-bit register. */
6399 Assert(iReg < 8);
6400 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6401}
6402
6403
6404/**
6405 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6406 *
6407 * @returns Register reference.
6408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6409 * @param iReg The register.
6410 */
6411DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6412{
6413 Assert(iReg < 16);
6414 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6415}
6416
6417
6418/**
6419 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6420 *
6421 * @returns Register reference.
6422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6423 * @param iReg The register.
6424 */
6425DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6426{
6427 Assert(iReg < 16);
6428 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6429}
6430
6431
6432/**
6433 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6434 *
6435 * @returns Register reference.
6436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6437 * @param iReg The register.
6438 */
6439DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6440{
6441 Assert(iReg < 64);
6442 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6443}
6444
6445
6446/**
6447 * Gets a reference (pointer) to the specified segment register's base address.
6448 *
6449 * @returns Segment register base address reference.
6450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6451 * @param iSegReg The segment selector.
6452 */
6453DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6454{
6455 Assert(iSegReg < X86_SREG_COUNT);
6456 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6457 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6458}
6459
6460
6461/**
6462 * Fetches the value of a 8-bit general purpose register.
6463 *
6464 * @returns The register value.
6465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6466 * @param iReg The register.
6467 */
6468DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6469{
6470 return *iemGRegRefU8(pVCpu, iReg);
6471}
6472
6473
6474/**
6475 * Fetches the value of a 16-bit general purpose register.
6476 *
6477 * @returns The register value.
6478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6479 * @param iReg The register.
6480 */
6481DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6482{
6483 Assert(iReg < 16);
6484 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6485}
6486
6487
6488/**
6489 * Fetches the value of a 32-bit general purpose register.
6490 *
6491 * @returns The register value.
6492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6493 * @param iReg The register.
6494 */
6495DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6496{
6497 Assert(iReg < 16);
6498 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6499}
6500
6501
6502/**
6503 * Fetches the value of a 64-bit general purpose register.
6504 *
6505 * @returns The register value.
6506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6507 * @param iReg The register.
6508 */
6509DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6510{
6511 Assert(iReg < 16);
6512 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6513}
6514
6515
6516/**
6517 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6518 *
6519 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6520 * segment limit.
6521 *
6522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6523 * @param offNextInstr The offset of the next instruction.
6524 */
6525IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6526{
6527 switch (pVCpu->iem.s.enmEffOpSize)
6528 {
6529 case IEMMODE_16BIT:
6530 {
6531 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6532 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6533 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6534 return iemRaiseGeneralProtectionFault0(pVCpu);
6535 pVCpu->cpum.GstCtx.rip = uNewIp;
6536 break;
6537 }
6538
6539 case IEMMODE_32BIT:
6540 {
6541 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6542 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6543
6544 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6545 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6546 return iemRaiseGeneralProtectionFault0(pVCpu);
6547 pVCpu->cpum.GstCtx.rip = uNewEip;
6548 break;
6549 }
6550
6551 case IEMMODE_64BIT:
6552 {
6553 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6554
6555 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6556 if (!IEM_IS_CANONICAL(uNewRip))
6557 return iemRaiseGeneralProtectionFault0(pVCpu);
6558 pVCpu->cpum.GstCtx.rip = uNewRip;
6559 break;
6560 }
6561
6562 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6563 }
6564
6565 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6566
6567#ifndef IEM_WITH_CODE_TLB
6568 /* Flush the prefetch buffer. */
6569 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6570#endif
6571
6572 return VINF_SUCCESS;
6573}
6574
6575
6576/**
6577 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6578 *
6579 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6580 * segment limit.
6581 *
6582 * @returns Strict VBox status code.
6583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6584 * @param offNextInstr The offset of the next instruction.
6585 */
6586IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6587{
6588 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6589
6590 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6591 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6592 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6593 return iemRaiseGeneralProtectionFault0(pVCpu);
6594 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6595 pVCpu->cpum.GstCtx.rip = uNewIp;
6596 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6597
6598#ifndef IEM_WITH_CODE_TLB
6599 /* Flush the prefetch buffer. */
6600 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6601#endif
6602
6603 return VINF_SUCCESS;
6604}
6605
6606
6607/**
6608 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6609 *
6610 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6611 * segment limit.
6612 *
6613 * @returns Strict VBox status code.
6614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6615 * @param offNextInstr The offset of the next instruction.
6616 */
6617IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6618{
6619 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6620
6621 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6622 {
6623 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6624
6625 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6626 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6627 return iemRaiseGeneralProtectionFault0(pVCpu);
6628 pVCpu->cpum.GstCtx.rip = uNewEip;
6629 }
6630 else
6631 {
6632 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6633
6634 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6635 if (!IEM_IS_CANONICAL(uNewRip))
6636 return iemRaiseGeneralProtectionFault0(pVCpu);
6637 pVCpu->cpum.GstCtx.rip = uNewRip;
6638 }
6639 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6640
6641#ifndef IEM_WITH_CODE_TLB
6642 /* Flush the prefetch buffer. */
6643 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6644#endif
6645
6646 return VINF_SUCCESS;
6647}
6648
6649
6650/**
6651 * Performs a near jump to the specified address.
6652 *
6653 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6654 * segment limit.
6655 *
6656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6657 * @param uNewRip The new RIP value.
6658 */
6659IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6660{
6661 switch (pVCpu->iem.s.enmEffOpSize)
6662 {
6663 case IEMMODE_16BIT:
6664 {
6665 Assert(uNewRip <= UINT16_MAX);
6666 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6667 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6668 return iemRaiseGeneralProtectionFault0(pVCpu);
6669 /** @todo Test 16-bit jump in 64-bit mode. */
6670 pVCpu->cpum.GstCtx.rip = uNewRip;
6671 break;
6672 }
6673
6674 case IEMMODE_32BIT:
6675 {
6676 Assert(uNewRip <= UINT32_MAX);
6677 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6678 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6679
6680 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6681 return iemRaiseGeneralProtectionFault0(pVCpu);
6682 pVCpu->cpum.GstCtx.rip = uNewRip;
6683 break;
6684 }
6685
6686 case IEMMODE_64BIT:
6687 {
6688 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6689
6690 if (!IEM_IS_CANONICAL(uNewRip))
6691 return iemRaiseGeneralProtectionFault0(pVCpu);
6692 pVCpu->cpum.GstCtx.rip = uNewRip;
6693 break;
6694 }
6695
6696 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6697 }
6698
6699 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6700
6701#ifndef IEM_WITH_CODE_TLB
6702 /* Flush the prefetch buffer. */
6703 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6704#endif
6705
6706 return VINF_SUCCESS;
6707}
6708
6709
6710/**
6711 * Get the address of the top of the stack.
6712 *
6713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6714 */
6715DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6716{
6717 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6718 return pVCpu->cpum.GstCtx.rsp;
6719 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6720 return pVCpu->cpum.GstCtx.esp;
6721 return pVCpu->cpum.GstCtx.sp;
6722}
6723
6724
6725/**
6726 * Updates the RIP/EIP/IP to point to the next instruction.
6727 *
6728 * This function leaves the EFLAGS.RF flag alone.
6729 *
6730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6731 * @param cbInstr The number of bytes to add.
6732 */
6733IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6734{
6735 switch (pVCpu->iem.s.enmCpuMode)
6736 {
6737 case IEMMODE_16BIT:
6738 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6739 pVCpu->cpum.GstCtx.eip += cbInstr;
6740 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6741 break;
6742
6743 case IEMMODE_32BIT:
6744 pVCpu->cpum.GstCtx.eip += cbInstr;
6745 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6746 break;
6747
6748 case IEMMODE_64BIT:
6749 pVCpu->cpum.GstCtx.rip += cbInstr;
6750 break;
6751 default: AssertFailed();
6752 }
6753}
6754
6755
6756#if 0
6757/**
6758 * Updates the RIP/EIP/IP to point to the next instruction.
6759 *
6760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6761 */
6762IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6763{
6764 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6765}
6766#endif
6767
6768
6769
6770/**
6771 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6772 *
6773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6774 * @param cbInstr The number of bytes to add.
6775 */
6776IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6777{
6778 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6779
6780 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6781#if ARCH_BITS >= 64
6782 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6783 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6784 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6785#else
6786 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6787 pVCpu->cpum.GstCtx.rip += cbInstr;
6788 else
6789 pVCpu->cpum.GstCtx.eip += cbInstr;
6790#endif
6791}
6792
6793
6794/**
6795 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6796 *
6797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6798 */
6799IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6800{
6801 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6802}
6803
6804
6805/**
6806 * Adds to the stack pointer.
6807 *
6808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6809 * @param cbToAdd The number of bytes to add (8-bit!).
6810 */
6811DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6812{
6813 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6814 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6815 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6816 pVCpu->cpum.GstCtx.esp += cbToAdd;
6817 else
6818 pVCpu->cpum.GstCtx.sp += cbToAdd;
6819}
6820
6821
6822/**
6823 * Subtracts from the stack pointer.
6824 *
6825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6826 * @param cbToSub The number of bytes to subtract (8-bit!).
6827 */
6828DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6829{
6830 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6831 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6832 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6833 pVCpu->cpum.GstCtx.esp -= cbToSub;
6834 else
6835 pVCpu->cpum.GstCtx.sp -= cbToSub;
6836}
6837
6838
6839/**
6840 * Adds to the temporary stack pointer.
6841 *
6842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6843 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6844 * @param cbToAdd The number of bytes to add (16-bit).
6845 */
6846DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6847{
6848 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6849 pTmpRsp->u += cbToAdd;
6850 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6851 pTmpRsp->DWords.dw0 += cbToAdd;
6852 else
6853 pTmpRsp->Words.w0 += cbToAdd;
6854}
6855
6856
6857/**
6858 * Subtracts from the temporary stack pointer.
6859 *
6860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6861 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6862 * @param cbToSub The number of bytes to subtract.
6863 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6864 * expecting that.
6865 */
6866DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6867{
6868 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6869 pTmpRsp->u -= cbToSub;
6870 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6871 pTmpRsp->DWords.dw0 -= cbToSub;
6872 else
6873 pTmpRsp->Words.w0 -= cbToSub;
6874}
6875
6876
6877/**
6878 * Calculates the effective stack address for a push of the specified size as
6879 * well as the new RSP value (upper bits may be masked).
6880 *
6881 * @returns Effective stack addressf for the push.
6882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6883 * @param cbItem The size of the stack item to pop.
6884 * @param puNewRsp Where to return the new RSP value.
6885 */
6886DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6887{
6888 RTUINT64U uTmpRsp;
6889 RTGCPTR GCPtrTop;
6890 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6891
6892 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6893 GCPtrTop = uTmpRsp.u -= cbItem;
6894 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6895 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6896 else
6897 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6898 *puNewRsp = uTmpRsp.u;
6899 return GCPtrTop;
6900}
6901
6902
6903/**
6904 * Gets the current stack pointer and calculates the value after a pop of the
6905 * specified size.
6906 *
6907 * @returns Current stack pointer.
6908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6909 * @param cbItem The size of the stack item to pop.
6910 * @param puNewRsp Where to return the new RSP value.
6911 */
6912DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6913{
6914 RTUINT64U uTmpRsp;
6915 RTGCPTR GCPtrTop;
6916 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6917
6918 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6919 {
6920 GCPtrTop = uTmpRsp.u;
6921 uTmpRsp.u += cbItem;
6922 }
6923 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6924 {
6925 GCPtrTop = uTmpRsp.DWords.dw0;
6926 uTmpRsp.DWords.dw0 += cbItem;
6927 }
6928 else
6929 {
6930 GCPtrTop = uTmpRsp.Words.w0;
6931 uTmpRsp.Words.w0 += cbItem;
6932 }
6933 *puNewRsp = uTmpRsp.u;
6934 return GCPtrTop;
6935}
6936
6937
6938/**
6939 * Calculates the effective stack address for a push of the specified size as
6940 * well as the new temporary RSP value (upper bits may be masked).
6941 *
6942 * @returns Effective stack addressf for the push.
6943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6944 * @param pTmpRsp The temporary stack pointer. This is updated.
6945 * @param cbItem The size of the stack item to pop.
6946 */
6947DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6948{
6949 RTGCPTR GCPtrTop;
6950
6951 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6952 GCPtrTop = pTmpRsp->u -= cbItem;
6953 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6954 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6955 else
6956 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6957 return GCPtrTop;
6958}
6959
6960
6961/**
6962 * Gets the effective stack address for a pop of the specified size and
6963 * calculates and updates the temporary RSP.
6964 *
6965 * @returns Current stack pointer.
6966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6967 * @param pTmpRsp The temporary stack pointer. This is updated.
6968 * @param cbItem The size of the stack item to pop.
6969 */
6970DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6971{
6972 RTGCPTR GCPtrTop;
6973 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6974 {
6975 GCPtrTop = pTmpRsp->u;
6976 pTmpRsp->u += cbItem;
6977 }
6978 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6979 {
6980 GCPtrTop = pTmpRsp->DWords.dw0;
6981 pTmpRsp->DWords.dw0 += cbItem;
6982 }
6983 else
6984 {
6985 GCPtrTop = pTmpRsp->Words.w0;
6986 pTmpRsp->Words.w0 += cbItem;
6987 }
6988 return GCPtrTop;
6989}
6990
6991/** @} */
6992
6993
6994/** @name FPU access and helpers.
6995 *
6996 * @{
6997 */
6998
6999
7000/**
7001 * Hook for preparing to use the host FPU.
7002 *
7003 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7004 *
7005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7006 */
7007DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
7008{
7009#ifdef IN_RING3
7010 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7011#else
7012 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7013#endif
7014 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7015}
7016
7017
7018/**
7019 * Hook for preparing to use the host FPU for SSE.
7020 *
7021 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7022 *
7023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7024 */
7025DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
7026{
7027 iemFpuPrepareUsage(pVCpu);
7028}
7029
7030
7031/**
7032 * Hook for preparing to use the host FPU for AVX.
7033 *
7034 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7035 *
7036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7037 */
7038DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7039{
7040 iemFpuPrepareUsage(pVCpu);
7041}
7042
7043
7044/**
7045 * Hook for actualizing the guest FPU state before the interpreter reads it.
7046 *
7047 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7048 *
7049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7050 */
7051DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7052{
7053#ifdef IN_RING3
7054 NOREF(pVCpu);
7055#else
7056 CPUMRZFpuStateActualizeForRead(pVCpu);
7057#endif
7058 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7059}
7060
7061
7062/**
7063 * Hook for actualizing the guest FPU state before the interpreter changes it.
7064 *
7065 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7066 *
7067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7068 */
7069DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7070{
7071#ifdef IN_RING3
7072 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7073#else
7074 CPUMRZFpuStateActualizeForChange(pVCpu);
7075#endif
7076 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7077}
7078
7079
7080/**
7081 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7082 * only.
7083 *
7084 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7085 *
7086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7087 */
7088DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7089{
7090#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7091 NOREF(pVCpu);
7092#else
7093 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7094#endif
7095 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7096}
7097
7098
7099/**
7100 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7101 * read+write.
7102 *
7103 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7104 *
7105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7106 */
7107DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7108{
7109#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7110 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7111#else
7112 CPUMRZFpuStateActualizeForChange(pVCpu);
7113#endif
7114 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7115}
7116
7117
7118/**
7119 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7120 * only.
7121 *
7122 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7123 *
7124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7125 */
7126DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7127{
7128#ifdef IN_RING3
7129 NOREF(pVCpu);
7130#else
7131 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7132#endif
7133 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7134}
7135
7136
7137/**
7138 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7139 * read+write.
7140 *
7141 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7142 *
7143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7144 */
7145DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7146{
7147#ifdef IN_RING3
7148 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7149#else
7150 CPUMRZFpuStateActualizeForChange(pVCpu);
7151#endif
7152 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7153}
7154
7155
7156/**
7157 * Stores a QNaN value into a FPU register.
7158 *
7159 * @param pReg Pointer to the register.
7160 */
7161DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7162{
7163 pReg->au32[0] = UINT32_C(0x00000000);
7164 pReg->au32[1] = UINT32_C(0xc0000000);
7165 pReg->au16[4] = UINT16_C(0xffff);
7166}
7167
7168
7169/**
7170 * Updates the FOP, FPU.CS and FPUIP registers.
7171 *
7172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7173 * @param pFpuCtx The FPU context.
7174 */
7175DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7176{
7177 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7178 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7179 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7180 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7181 {
7182 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7183 * happens in real mode here based on the fnsave and fnstenv images. */
7184 pFpuCtx->CS = 0;
7185 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7186 }
7187 else
7188 {
7189 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7190 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7191 }
7192}
7193
7194
7195/**
7196 * Updates the x87.DS and FPUDP registers.
7197 *
7198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7199 * @param pFpuCtx The FPU context.
7200 * @param iEffSeg The effective segment register.
7201 * @param GCPtrEff The effective address relative to @a iEffSeg.
7202 */
7203DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7204{
7205 RTSEL sel;
7206 switch (iEffSeg)
7207 {
7208 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7209 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7210 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7211 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7212 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7213 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7214 default:
7215 AssertMsgFailed(("%d\n", iEffSeg));
7216 sel = pVCpu->cpum.GstCtx.ds.Sel;
7217 }
7218 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7219 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7220 {
7221 pFpuCtx->DS = 0;
7222 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7223 }
7224 else
7225 {
7226 pFpuCtx->DS = sel;
7227 pFpuCtx->FPUDP = GCPtrEff;
7228 }
7229}
7230
7231
7232/**
7233 * Rotates the stack registers in the push direction.
7234 *
7235 * @param pFpuCtx The FPU context.
7236 * @remarks This is a complete waste of time, but fxsave stores the registers in
7237 * stack order.
7238 */
7239DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7240{
7241 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7242 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7243 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7244 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7245 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7246 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7247 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7248 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7249 pFpuCtx->aRegs[0].r80 = r80Tmp;
7250}
7251
7252
7253/**
7254 * Rotates the stack registers in the pop direction.
7255 *
7256 * @param pFpuCtx The FPU context.
7257 * @remarks This is a complete waste of time, but fxsave stores the registers in
7258 * stack order.
7259 */
7260DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7261{
7262 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7263 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7264 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7265 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7266 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7267 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7268 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7269 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7270 pFpuCtx->aRegs[7].r80 = r80Tmp;
7271}
7272
7273
7274/**
7275 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7276 * exception prevents it.
7277 *
7278 * @param pResult The FPU operation result to push.
7279 * @param pFpuCtx The FPU context.
7280 */
7281IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7282{
7283 /* Update FSW and bail if there are pending exceptions afterwards. */
7284 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7285 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7286 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7287 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7288 {
7289 pFpuCtx->FSW = fFsw;
7290 return;
7291 }
7292
7293 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7294 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7295 {
7296 /* All is fine, push the actual value. */
7297 pFpuCtx->FTW |= RT_BIT(iNewTop);
7298 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7299 }
7300 else if (pFpuCtx->FCW & X86_FCW_IM)
7301 {
7302 /* Masked stack overflow, push QNaN. */
7303 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7304 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7305 }
7306 else
7307 {
7308 /* Raise stack overflow, don't push anything. */
7309 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7310 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7311 return;
7312 }
7313
7314 fFsw &= ~X86_FSW_TOP_MASK;
7315 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7316 pFpuCtx->FSW = fFsw;
7317
7318 iemFpuRotateStackPush(pFpuCtx);
7319}
7320
7321
7322/**
7323 * Stores a result in a FPU register and updates the FSW and FTW.
7324 *
7325 * @param pFpuCtx The FPU context.
7326 * @param pResult The result to store.
7327 * @param iStReg Which FPU register to store it in.
7328 */
7329IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7330{
7331 Assert(iStReg < 8);
7332 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7333 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7334 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7335 pFpuCtx->FTW |= RT_BIT(iReg);
7336 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7337}
7338
7339
7340/**
7341 * Only updates the FPU status word (FSW) with the result of the current
7342 * instruction.
7343 *
7344 * @param pFpuCtx The FPU context.
7345 * @param u16FSW The FSW output of the current instruction.
7346 */
7347IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7348{
7349 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7350 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7351}
7352
7353
7354/**
7355 * Pops one item off the FPU stack if no pending exception prevents it.
7356 *
7357 * @param pFpuCtx The FPU context.
7358 */
7359IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7360{
7361 /* Check pending exceptions. */
7362 uint16_t uFSW = pFpuCtx->FSW;
7363 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7364 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7365 return;
7366
7367 /* TOP--. */
7368 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7369 uFSW &= ~X86_FSW_TOP_MASK;
7370 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7371 pFpuCtx->FSW = uFSW;
7372
7373 /* Mark the previous ST0 as empty. */
7374 iOldTop >>= X86_FSW_TOP_SHIFT;
7375 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7376
7377 /* Rotate the registers. */
7378 iemFpuRotateStackPop(pFpuCtx);
7379}
7380
7381
7382/**
7383 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 * @param pResult The FPU operation result to push.
7387 */
7388IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7389{
7390 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7392 iemFpuMaybePushResult(pResult, pFpuCtx);
7393}
7394
7395
7396/**
7397 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7398 * and sets FPUDP and FPUDS.
7399 *
7400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7401 * @param pResult The FPU operation result to push.
7402 * @param iEffSeg The effective segment register.
7403 * @param GCPtrEff The effective address relative to @a iEffSeg.
7404 */
7405IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7406{
7407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7408 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7409 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7410 iemFpuMaybePushResult(pResult, pFpuCtx);
7411}
7412
7413
7414/**
7415 * Replace ST0 with the first value and push the second onto the FPU stack,
7416 * unless a pending exception prevents it.
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param pResult The FPU operation result to store and push.
7420 */
7421IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7422{
7423 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7424 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7425
7426 /* Update FSW and bail if there are pending exceptions afterwards. */
7427 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7428 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7429 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7430 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7431 {
7432 pFpuCtx->FSW = fFsw;
7433 return;
7434 }
7435
7436 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7437 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7438 {
7439 /* All is fine, push the actual value. */
7440 pFpuCtx->FTW |= RT_BIT(iNewTop);
7441 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7442 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7443 }
7444 else if (pFpuCtx->FCW & X86_FCW_IM)
7445 {
7446 /* Masked stack overflow, push QNaN. */
7447 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7448 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7449 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7450 }
7451 else
7452 {
7453 /* Raise stack overflow, don't push anything. */
7454 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7455 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7456 return;
7457 }
7458
7459 fFsw &= ~X86_FSW_TOP_MASK;
7460 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7461 pFpuCtx->FSW = fFsw;
7462
7463 iemFpuRotateStackPush(pFpuCtx);
7464}
7465
7466
7467/**
7468 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7469 * FOP.
7470 *
7471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7472 * @param pResult The result to store.
7473 * @param iStReg Which FPU register to store it in.
7474 */
7475IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7476{
7477 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7478 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7479 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7480}
7481
7482
7483/**
7484 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7485 * FOP, and then pops the stack.
7486 *
7487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7488 * @param pResult The result to store.
7489 * @param iStReg Which FPU register to store it in.
7490 */
7491IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7492{
7493 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7494 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7495 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7496 iemFpuMaybePopOne(pFpuCtx);
7497}
7498
7499
7500/**
7501 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7502 * FPUDP, and FPUDS.
7503 *
7504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7505 * @param pResult The result to store.
7506 * @param iStReg Which FPU register to store it in.
7507 * @param iEffSeg The effective memory operand selector register.
7508 * @param GCPtrEff The effective memory operand offset.
7509 */
7510IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7511 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7512{
7513 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7514 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7515 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7516 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7517}
7518
7519
7520/**
7521 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7522 * FPUDP, and FPUDS, and then pops the stack.
7523 *
7524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7525 * @param pResult The result to store.
7526 * @param iStReg Which FPU register to store it in.
7527 * @param iEffSeg The effective memory operand selector register.
7528 * @param GCPtrEff The effective memory operand offset.
7529 */
7530IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7531 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7532{
7533 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7534 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7535 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7536 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7537 iemFpuMaybePopOne(pFpuCtx);
7538}
7539
7540
7541/**
7542 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7543 *
7544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7545 */
7546IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7547{
7548 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7549 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7550}
7551
7552
7553/**
7554 * Marks the specified stack register as free (for FFREE).
7555 *
7556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7557 * @param iStReg The register to free.
7558 */
7559IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7560{
7561 Assert(iStReg < 8);
7562 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7563 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7564 pFpuCtx->FTW &= ~RT_BIT(iReg);
7565}
7566
7567
7568/**
7569 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7570 *
7571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7572 */
7573IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7574{
7575 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7576 uint16_t uFsw = pFpuCtx->FSW;
7577 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7578 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7579 uFsw &= ~X86_FSW_TOP_MASK;
7580 uFsw |= uTop;
7581 pFpuCtx->FSW = uFsw;
7582}
7583
7584
7585/**
7586 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7587 *
7588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7589 */
7590IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7591{
7592 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7593 uint16_t uFsw = pFpuCtx->FSW;
7594 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7595 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7596 uFsw &= ~X86_FSW_TOP_MASK;
7597 uFsw |= uTop;
7598 pFpuCtx->FSW = uFsw;
7599}
7600
7601
7602/**
7603 * Updates the FSW, FOP, FPUIP, and FPUCS.
7604 *
7605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7606 * @param u16FSW The FSW from the current instruction.
7607 */
7608IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7609{
7610 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7611 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7612 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7613}
7614
7615
7616/**
7617 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7618 *
7619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7620 * @param u16FSW The FSW from the current instruction.
7621 */
7622IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7623{
7624 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7625 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7626 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7627 iemFpuMaybePopOne(pFpuCtx);
7628}
7629
7630
7631/**
7632 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7633 *
7634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7635 * @param u16FSW The FSW from the current instruction.
7636 * @param iEffSeg The effective memory operand selector register.
7637 * @param GCPtrEff The effective memory operand offset.
7638 */
7639IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7640{
7641 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7642 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7643 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7644 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7645}
7646
7647
7648/**
7649 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7650 *
7651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7652 * @param u16FSW The FSW from the current instruction.
7653 */
7654IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7655{
7656 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7657 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7658 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7659 iemFpuMaybePopOne(pFpuCtx);
7660 iemFpuMaybePopOne(pFpuCtx);
7661}
7662
7663
7664/**
7665 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7666 *
7667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7668 * @param u16FSW The FSW from the current instruction.
7669 * @param iEffSeg The effective memory operand selector register.
7670 * @param GCPtrEff The effective memory operand offset.
7671 */
7672IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7673{
7674 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7675 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7676 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7677 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7678 iemFpuMaybePopOne(pFpuCtx);
7679}
7680
7681
7682/**
7683 * Worker routine for raising an FPU stack underflow exception.
7684 *
7685 * @param pFpuCtx The FPU context.
7686 * @param iStReg The stack register being accessed.
7687 */
7688IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7689{
7690 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7691 if (pFpuCtx->FCW & X86_FCW_IM)
7692 {
7693 /* Masked underflow. */
7694 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7695 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7696 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7697 if (iStReg != UINT8_MAX)
7698 {
7699 pFpuCtx->FTW |= RT_BIT(iReg);
7700 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7701 }
7702 }
7703 else
7704 {
7705 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7706 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7707 }
7708}
7709
7710
7711/**
7712 * Raises a FPU stack underflow exception.
7713 *
7714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7715 * @param iStReg The destination register that should be loaded
7716 * with QNaN if \#IS is not masked. Specify
7717 * UINT8_MAX if none (like for fcom).
7718 */
7719DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7720{
7721 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7722 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7723 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7724}
7725
7726
7727DECL_NO_INLINE(IEM_STATIC, void)
7728iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7729{
7730 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7731 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7732 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7733 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7734}
7735
7736
7737DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7738{
7739 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7740 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7741 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7742 iemFpuMaybePopOne(pFpuCtx);
7743}
7744
7745
7746DECL_NO_INLINE(IEM_STATIC, void)
7747iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7748{
7749 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7750 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7751 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7752 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7753 iemFpuMaybePopOne(pFpuCtx);
7754}
7755
7756
7757DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7758{
7759 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7760 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7761 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7762 iemFpuMaybePopOne(pFpuCtx);
7763 iemFpuMaybePopOne(pFpuCtx);
7764}
7765
7766
7767DECL_NO_INLINE(IEM_STATIC, void)
7768iemFpuStackPushUnderflow(PVMCPU pVCpu)
7769{
7770 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7771 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7772
7773 if (pFpuCtx->FCW & X86_FCW_IM)
7774 {
7775 /* Masked overflow - Push QNaN. */
7776 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7777 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7778 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7779 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7780 pFpuCtx->FTW |= RT_BIT(iNewTop);
7781 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7782 iemFpuRotateStackPush(pFpuCtx);
7783 }
7784 else
7785 {
7786 /* Exception pending - don't change TOP or the register stack. */
7787 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7788 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7789 }
7790}
7791
7792
7793DECL_NO_INLINE(IEM_STATIC, void)
7794iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7795{
7796 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7797 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7798
7799 if (pFpuCtx->FCW & X86_FCW_IM)
7800 {
7801 /* Masked overflow - Push QNaN. */
7802 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7803 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7804 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7805 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7806 pFpuCtx->FTW |= RT_BIT(iNewTop);
7807 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7808 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7809 iemFpuRotateStackPush(pFpuCtx);
7810 }
7811 else
7812 {
7813 /* Exception pending - don't change TOP or the register stack. */
7814 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7815 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7816 }
7817}
7818
7819
7820/**
7821 * Worker routine for raising an FPU stack overflow exception on a push.
7822 *
7823 * @param pFpuCtx The FPU context.
7824 */
7825IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7826{
7827 if (pFpuCtx->FCW & X86_FCW_IM)
7828 {
7829 /* Masked overflow. */
7830 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7831 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7832 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7833 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7834 pFpuCtx->FTW |= RT_BIT(iNewTop);
7835 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7836 iemFpuRotateStackPush(pFpuCtx);
7837 }
7838 else
7839 {
7840 /* Exception pending - don't change TOP or the register stack. */
7841 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7842 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7843 }
7844}
7845
7846
7847/**
7848 * Raises a FPU stack overflow exception on a push.
7849 *
7850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7851 */
7852DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7853{
7854 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7855 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7856 iemFpuStackPushOverflowOnly(pFpuCtx);
7857}
7858
7859
7860/**
7861 * Raises a FPU stack overflow exception on a push with a memory operand.
7862 *
7863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7864 * @param iEffSeg The effective memory operand selector register.
7865 * @param GCPtrEff The effective memory operand offset.
7866 */
7867DECL_NO_INLINE(IEM_STATIC, void)
7868iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7869{
7870 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7871 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7872 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7873 iemFpuStackPushOverflowOnly(pFpuCtx);
7874}
7875
7876
7877IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7878{
7879 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7880 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7881 if (pFpuCtx->FTW & RT_BIT(iReg))
7882 return VINF_SUCCESS;
7883 return VERR_NOT_FOUND;
7884}
7885
7886
7887IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7888{
7889 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7890 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7891 if (pFpuCtx->FTW & RT_BIT(iReg))
7892 {
7893 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7894 return VINF_SUCCESS;
7895 }
7896 return VERR_NOT_FOUND;
7897}
7898
7899
7900IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7901 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7902{
7903 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7904 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7905 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7906 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7907 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7908 {
7909 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7910 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7911 return VINF_SUCCESS;
7912 }
7913 return VERR_NOT_FOUND;
7914}
7915
7916
7917IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7918{
7919 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7920 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7921 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7922 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7923 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7924 {
7925 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7926 return VINF_SUCCESS;
7927 }
7928 return VERR_NOT_FOUND;
7929}
7930
7931
7932/**
7933 * Updates the FPU exception status after FCW is changed.
7934 *
7935 * @param pFpuCtx The FPU context.
7936 */
7937IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7938{
7939 uint16_t u16Fsw = pFpuCtx->FSW;
7940 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7941 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7942 else
7943 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7944 pFpuCtx->FSW = u16Fsw;
7945}
7946
7947
7948/**
7949 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7950 *
7951 * @returns The full FTW.
7952 * @param pFpuCtx The FPU context.
7953 */
7954IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7955{
7956 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7957 uint16_t u16Ftw = 0;
7958 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7959 for (unsigned iSt = 0; iSt < 8; iSt++)
7960 {
7961 unsigned const iReg = (iSt + iTop) & 7;
7962 if (!(u8Ftw & RT_BIT(iReg)))
7963 u16Ftw |= 3 << (iReg * 2); /* empty */
7964 else
7965 {
7966 uint16_t uTag;
7967 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7968 if (pr80Reg->s.uExponent == 0x7fff)
7969 uTag = 2; /* Exponent is all 1's => Special. */
7970 else if (pr80Reg->s.uExponent == 0x0000)
7971 {
7972 if (pr80Reg->s.u64Mantissa == 0x0000)
7973 uTag = 1; /* All bits are zero => Zero. */
7974 else
7975 uTag = 2; /* Must be special. */
7976 }
7977 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7978 uTag = 0; /* Valid. */
7979 else
7980 uTag = 2; /* Must be special. */
7981
7982 u16Ftw |= uTag << (iReg * 2); /* empty */
7983 }
7984 }
7985
7986 return u16Ftw;
7987}
7988
7989
7990/**
7991 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7992 *
7993 * @returns The compressed FTW.
7994 * @param u16FullFtw The full FTW to convert.
7995 */
7996IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7997{
7998 uint8_t u8Ftw = 0;
7999 for (unsigned i = 0; i < 8; i++)
8000 {
8001 if ((u16FullFtw & 3) != 3 /*empty*/)
8002 u8Ftw |= RT_BIT(i);
8003 u16FullFtw >>= 2;
8004 }
8005
8006 return u8Ftw;
8007}
8008
8009/** @} */
8010
8011
8012/** @name Memory access.
8013 *
8014 * @{
8015 */
8016
8017
8018/**
8019 * Updates the IEMCPU::cbWritten counter if applicable.
8020 *
8021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8022 * @param fAccess The access being accounted for.
8023 * @param cbMem The access size.
8024 */
8025DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
8026{
8027 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8028 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8029 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8030}
8031
8032
8033/**
8034 * Checks if the given segment can be written to, raise the appropriate
8035 * exception if not.
8036 *
8037 * @returns VBox strict status code.
8038 *
8039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8040 * @param pHid Pointer to the hidden register.
8041 * @param iSegReg The register number.
8042 * @param pu64BaseAddr Where to return the base address to use for the
8043 * segment. (In 64-bit code it may differ from the
8044 * base in the hidden segment.)
8045 */
8046IEM_STATIC VBOXSTRICTRC
8047iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8048{
8049 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8050
8051 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8052 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8053 else
8054 {
8055 if (!pHid->Attr.n.u1Present)
8056 {
8057 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8058 AssertRelease(uSel == 0);
8059 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8060 return iemRaiseGeneralProtectionFault0(pVCpu);
8061 }
8062
8063 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8064 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8065 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8066 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8067 *pu64BaseAddr = pHid->u64Base;
8068 }
8069 return VINF_SUCCESS;
8070}
8071
8072
8073/**
8074 * Checks if the given segment can be read from, raise the appropriate
8075 * exception if not.
8076 *
8077 * @returns VBox strict status code.
8078 *
8079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8080 * @param pHid Pointer to the hidden register.
8081 * @param iSegReg The register number.
8082 * @param pu64BaseAddr Where to return the base address to use for the
8083 * segment. (In 64-bit code it may differ from the
8084 * base in the hidden segment.)
8085 */
8086IEM_STATIC VBOXSTRICTRC
8087iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8088{
8089 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8090
8091 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8092 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8093 else
8094 {
8095 if (!pHid->Attr.n.u1Present)
8096 {
8097 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8098 AssertRelease(uSel == 0);
8099 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8100 return iemRaiseGeneralProtectionFault0(pVCpu);
8101 }
8102
8103 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8104 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8105 *pu64BaseAddr = pHid->u64Base;
8106 }
8107 return VINF_SUCCESS;
8108}
8109
8110
8111/**
8112 * Applies the segment limit, base and attributes.
8113 *
8114 * This may raise a \#GP or \#SS.
8115 *
8116 * @returns VBox strict status code.
8117 *
8118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8119 * @param fAccess The kind of access which is being performed.
8120 * @param iSegReg The index of the segment register to apply.
8121 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8122 * TSS, ++).
8123 * @param cbMem The access size.
8124 * @param pGCPtrMem Pointer to the guest memory address to apply
8125 * segmentation to. Input and output parameter.
8126 */
8127IEM_STATIC VBOXSTRICTRC
8128iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8129{
8130 if (iSegReg == UINT8_MAX)
8131 return VINF_SUCCESS;
8132
8133 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8134 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8135 switch (pVCpu->iem.s.enmCpuMode)
8136 {
8137 case IEMMODE_16BIT:
8138 case IEMMODE_32BIT:
8139 {
8140 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8141 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8142
8143 if ( pSel->Attr.n.u1Present
8144 && !pSel->Attr.n.u1Unusable)
8145 {
8146 Assert(pSel->Attr.n.u1DescType);
8147 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8148 {
8149 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8150 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8151 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8152
8153 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8154 {
8155 /** @todo CPL check. */
8156 }
8157
8158 /*
8159 * There are two kinds of data selectors, normal and expand down.
8160 */
8161 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8162 {
8163 if ( GCPtrFirst32 > pSel->u32Limit
8164 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8165 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8166 }
8167 else
8168 {
8169 /*
8170 * The upper boundary is defined by the B bit, not the G bit!
8171 */
8172 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8173 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8174 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8175 }
8176 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8177 }
8178 else
8179 {
8180
8181 /*
8182 * Code selector and usually be used to read thru, writing is
8183 * only permitted in real and V8086 mode.
8184 */
8185 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8186 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8187 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8188 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8189 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8190
8191 if ( GCPtrFirst32 > pSel->u32Limit
8192 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8193 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8194
8195 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8196 {
8197 /** @todo CPL check. */
8198 }
8199
8200 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8201 }
8202 }
8203 else
8204 return iemRaiseGeneralProtectionFault0(pVCpu);
8205 return VINF_SUCCESS;
8206 }
8207
8208 case IEMMODE_64BIT:
8209 {
8210 RTGCPTR GCPtrMem = *pGCPtrMem;
8211 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8212 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8213
8214 Assert(cbMem >= 1);
8215 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8216 return VINF_SUCCESS;
8217 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8218 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8219 return iemRaiseGeneralProtectionFault0(pVCpu);
8220 }
8221
8222 default:
8223 AssertFailedReturn(VERR_IEM_IPE_7);
8224 }
8225}
8226
8227
8228/**
8229 * Translates a virtual address to a physical physical address and checks if we
8230 * can access the page as specified.
8231 *
8232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8233 * @param GCPtrMem The virtual address.
8234 * @param fAccess The intended access.
8235 * @param pGCPhysMem Where to return the physical address.
8236 */
8237IEM_STATIC VBOXSTRICTRC
8238iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8239{
8240 /** @todo Need a different PGM interface here. We're currently using
8241 * generic / REM interfaces. this won't cut it for R0 & RC. */
8242 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8243 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8244 RTGCPHYS GCPhys;
8245 uint64_t fFlags;
8246 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8247 if (RT_FAILURE(rc))
8248 {
8249 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8250 /** @todo Check unassigned memory in unpaged mode. */
8251 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8252 *pGCPhysMem = NIL_RTGCPHYS;
8253 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8254 }
8255
8256 /* If the page is writable and does not have the no-exec bit set, all
8257 access is allowed. Otherwise we'll have to check more carefully... */
8258 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8259 {
8260 /* Write to read only memory? */
8261 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8262 && !(fFlags & X86_PTE_RW)
8263 && ( (pVCpu->iem.s.uCpl == 3
8264 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8265 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8266 {
8267 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8268 *pGCPhysMem = NIL_RTGCPHYS;
8269 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8270 }
8271
8272 /* Kernel memory accessed by userland? */
8273 if ( !(fFlags & X86_PTE_US)
8274 && pVCpu->iem.s.uCpl == 3
8275 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8276 {
8277 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8278 *pGCPhysMem = NIL_RTGCPHYS;
8279 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8280 }
8281
8282 /* Executing non-executable memory? */
8283 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8284 && (fFlags & X86_PTE_PAE_NX)
8285 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8286 {
8287 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8288 *pGCPhysMem = NIL_RTGCPHYS;
8289 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8290 VERR_ACCESS_DENIED);
8291 }
8292 }
8293
8294 /*
8295 * Set the dirty / access flags.
8296 * ASSUMES this is set when the address is translated rather than on committ...
8297 */
8298 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8299 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8300 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8301 {
8302 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8303 AssertRC(rc2);
8304 }
8305
8306 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8307 *pGCPhysMem = GCPhys;
8308 return VINF_SUCCESS;
8309}
8310
8311
8312
8313/**
8314 * Maps a physical page.
8315 *
8316 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8318 * @param GCPhysMem The physical address.
8319 * @param fAccess The intended access.
8320 * @param ppvMem Where to return the mapping address.
8321 * @param pLock The PGM lock.
8322 */
8323IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8324{
8325#ifdef IEM_LOG_MEMORY_WRITES
8326 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8327 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8328#endif
8329
8330 /** @todo This API may require some improving later. A private deal with PGM
8331 * regarding locking and unlocking needs to be struct. A couple of TLBs
8332 * living in PGM, but with publicly accessible inlined access methods
8333 * could perhaps be an even better solution. */
8334 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8335 GCPhysMem,
8336 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8337 pVCpu->iem.s.fBypassHandlers,
8338 ppvMem,
8339 pLock);
8340 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8341 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8342
8343 return rc;
8344}
8345
8346
8347/**
8348 * Unmap a page previously mapped by iemMemPageMap.
8349 *
8350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8351 * @param GCPhysMem The physical address.
8352 * @param fAccess The intended access.
8353 * @param pvMem What iemMemPageMap returned.
8354 * @param pLock The PGM lock.
8355 */
8356DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8357{
8358 NOREF(pVCpu);
8359 NOREF(GCPhysMem);
8360 NOREF(fAccess);
8361 NOREF(pvMem);
8362 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8363}
8364
8365
8366/**
8367 * Looks up a memory mapping entry.
8368 *
8369 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8371 * @param pvMem The memory address.
8372 * @param fAccess The access to.
8373 */
8374DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8375{
8376 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8377 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8378 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8379 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8380 return 0;
8381 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8382 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8383 return 1;
8384 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8385 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8386 return 2;
8387 return VERR_NOT_FOUND;
8388}
8389
8390
8391/**
8392 * Finds a free memmap entry when using iNextMapping doesn't work.
8393 *
8394 * @returns Memory mapping index, 1024 on failure.
8395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8396 */
8397IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8398{
8399 /*
8400 * The easy case.
8401 */
8402 if (pVCpu->iem.s.cActiveMappings == 0)
8403 {
8404 pVCpu->iem.s.iNextMapping = 1;
8405 return 0;
8406 }
8407
8408 /* There should be enough mappings for all instructions. */
8409 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8410
8411 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8412 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8413 return i;
8414
8415 AssertFailedReturn(1024);
8416}
8417
8418
8419/**
8420 * Commits a bounce buffer that needs writing back and unmaps it.
8421 *
8422 * @returns Strict VBox status code.
8423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8424 * @param iMemMap The index of the buffer to commit.
8425 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8426 * Always false in ring-3, obviously.
8427 */
8428IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8429{
8430 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8431 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8432#ifdef IN_RING3
8433 Assert(!fPostponeFail);
8434 RT_NOREF_PV(fPostponeFail);
8435#endif
8436
8437 /*
8438 * Do the writing.
8439 */
8440 PVM pVM = pVCpu->CTX_SUFF(pVM);
8441 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8442 {
8443 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8444 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8445 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8446 if (!pVCpu->iem.s.fBypassHandlers)
8447 {
8448 /*
8449 * Carefully and efficiently dealing with access handler return
8450 * codes make this a little bloated.
8451 */
8452 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8454 pbBuf,
8455 cbFirst,
8456 PGMACCESSORIGIN_IEM);
8457 if (rcStrict == VINF_SUCCESS)
8458 {
8459 if (cbSecond)
8460 {
8461 rcStrict = PGMPhysWrite(pVM,
8462 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8463 pbBuf + cbFirst,
8464 cbSecond,
8465 PGMACCESSORIGIN_IEM);
8466 if (rcStrict == VINF_SUCCESS)
8467 { /* nothing */ }
8468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8469 {
8470 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8471 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8472 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8473 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8474 }
8475#ifndef IN_RING3
8476 else if (fPostponeFail)
8477 {
8478 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8480 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8481 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8482 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8483 return iemSetPassUpStatus(pVCpu, rcStrict);
8484 }
8485#endif
8486 else
8487 {
8488 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8489 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8490 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8491 return rcStrict;
8492 }
8493 }
8494 }
8495 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8496 {
8497 if (!cbSecond)
8498 {
8499 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8500 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8501 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8502 }
8503 else
8504 {
8505 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8506 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8507 pbBuf + cbFirst,
8508 cbSecond,
8509 PGMACCESSORIGIN_IEM);
8510 if (rcStrict2 == VINF_SUCCESS)
8511 {
8512 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8513 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8514 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8515 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8516 }
8517 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8518 {
8519 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8520 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8521 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8522 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8523 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8524 }
8525#ifndef IN_RING3
8526 else if (fPostponeFail)
8527 {
8528 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8529 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8530 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8531 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8532 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8533 return iemSetPassUpStatus(pVCpu, rcStrict);
8534 }
8535#endif
8536 else
8537 {
8538 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8539 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8540 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8541 return rcStrict2;
8542 }
8543 }
8544 }
8545#ifndef IN_RING3
8546 else if (fPostponeFail)
8547 {
8548 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8549 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8550 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8551 if (!cbSecond)
8552 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8553 else
8554 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8555 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8556 return iemSetPassUpStatus(pVCpu, rcStrict);
8557 }
8558#endif
8559 else
8560 {
8561 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8562 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8563 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8564 return rcStrict;
8565 }
8566 }
8567 else
8568 {
8569 /*
8570 * No access handlers, much simpler.
8571 */
8572 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8573 if (RT_SUCCESS(rc))
8574 {
8575 if (cbSecond)
8576 {
8577 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8578 if (RT_SUCCESS(rc))
8579 { /* likely */ }
8580 else
8581 {
8582 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8583 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8584 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8585 return rc;
8586 }
8587 }
8588 }
8589 else
8590 {
8591 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8592 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8593 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8594 return rc;
8595 }
8596 }
8597 }
8598
8599#if defined(IEM_LOG_MEMORY_WRITES)
8600 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8601 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8602 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8603 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8604 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8605 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8606
8607 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8608 g_cbIemWrote = cbWrote;
8609 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8610#endif
8611
8612 /*
8613 * Free the mapping entry.
8614 */
8615 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8616 Assert(pVCpu->iem.s.cActiveMappings != 0);
8617 pVCpu->iem.s.cActiveMappings--;
8618 return VINF_SUCCESS;
8619}
8620
8621
8622/**
8623 * iemMemMap worker that deals with a request crossing pages.
8624 */
8625IEM_STATIC VBOXSTRICTRC
8626iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8627{
8628 /*
8629 * Do the address translations.
8630 */
8631 RTGCPHYS GCPhysFirst;
8632 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8633 if (rcStrict != VINF_SUCCESS)
8634 return rcStrict;
8635
8636 RTGCPHYS GCPhysSecond;
8637 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8638 fAccess, &GCPhysSecond);
8639 if (rcStrict != VINF_SUCCESS)
8640 return rcStrict;
8641 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8642
8643 PVM pVM = pVCpu->CTX_SUFF(pVM);
8644
8645 /*
8646 * Read in the current memory content if it's a read, execute or partial
8647 * write access.
8648 */
8649 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8650 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8651 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8652
8653 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8654 {
8655 if (!pVCpu->iem.s.fBypassHandlers)
8656 {
8657 /*
8658 * Must carefully deal with access handler status codes here,
8659 * makes the code a bit bloated.
8660 */
8661 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8662 if (rcStrict == VINF_SUCCESS)
8663 {
8664 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8665 if (rcStrict == VINF_SUCCESS)
8666 { /*likely */ }
8667 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8668 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8669 else
8670 {
8671 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8672 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8673 return rcStrict;
8674 }
8675 }
8676 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8677 {
8678 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8679 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8680 {
8681 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8682 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8683 }
8684 else
8685 {
8686 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8687 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8688 return rcStrict2;
8689 }
8690 }
8691 else
8692 {
8693 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8694 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8695 return rcStrict;
8696 }
8697 }
8698 else
8699 {
8700 /*
8701 * No informational status codes here, much more straight forward.
8702 */
8703 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8704 if (RT_SUCCESS(rc))
8705 {
8706 Assert(rc == VINF_SUCCESS);
8707 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8708 if (RT_SUCCESS(rc))
8709 Assert(rc == VINF_SUCCESS);
8710 else
8711 {
8712 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8713 return rc;
8714 }
8715 }
8716 else
8717 {
8718 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8719 return rc;
8720 }
8721 }
8722 }
8723#ifdef VBOX_STRICT
8724 else
8725 memset(pbBuf, 0xcc, cbMem);
8726 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8727 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8728#endif
8729
8730 /*
8731 * Commit the bounce buffer entry.
8732 */
8733 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8734 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8735 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8736 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8737 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8738 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8739 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8740 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8741 pVCpu->iem.s.cActiveMappings++;
8742
8743 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8744 *ppvMem = pbBuf;
8745 return VINF_SUCCESS;
8746}
8747
8748
8749/**
8750 * iemMemMap woker that deals with iemMemPageMap failures.
8751 */
8752IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8753 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8754{
8755 /*
8756 * Filter out conditions we can handle and the ones which shouldn't happen.
8757 */
8758 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8759 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8760 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8761 {
8762 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8763 return rcMap;
8764 }
8765 pVCpu->iem.s.cPotentialExits++;
8766
8767 /*
8768 * Read in the current memory content if it's a read, execute or partial
8769 * write access.
8770 */
8771 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8772 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8773 {
8774 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8775 memset(pbBuf, 0xff, cbMem);
8776 else
8777 {
8778 int rc;
8779 if (!pVCpu->iem.s.fBypassHandlers)
8780 {
8781 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8782 if (rcStrict == VINF_SUCCESS)
8783 { /* nothing */ }
8784 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8785 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8786 else
8787 {
8788 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8789 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8790 return rcStrict;
8791 }
8792 }
8793 else
8794 {
8795 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8796 if (RT_SUCCESS(rc))
8797 { /* likely */ }
8798 else
8799 {
8800 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8801 GCPhysFirst, rc));
8802 return rc;
8803 }
8804 }
8805 }
8806 }
8807#ifdef VBOX_STRICT
8808 else
8809 memset(pbBuf, 0xcc, cbMem);
8810#endif
8811#ifdef VBOX_STRICT
8812 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8813 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8814#endif
8815
8816 /*
8817 * Commit the bounce buffer entry.
8818 */
8819 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8820 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8821 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8822 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8823 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8824 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8825 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8826 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8827 pVCpu->iem.s.cActiveMappings++;
8828
8829 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8830 *ppvMem = pbBuf;
8831 return VINF_SUCCESS;
8832}
8833
8834
8835
8836/**
8837 * Maps the specified guest memory for the given kind of access.
8838 *
8839 * This may be using bounce buffering of the memory if it's crossing a page
8840 * boundary or if there is an access handler installed for any of it. Because
8841 * of lock prefix guarantees, we're in for some extra clutter when this
8842 * happens.
8843 *
8844 * This may raise a \#GP, \#SS, \#PF or \#AC.
8845 *
8846 * @returns VBox strict status code.
8847 *
8848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8849 * @param ppvMem Where to return the pointer to the mapped
8850 * memory.
8851 * @param cbMem The number of bytes to map. This is usually 1,
8852 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8853 * string operations it can be up to a page.
8854 * @param iSegReg The index of the segment register to use for
8855 * this access. The base and limits are checked.
8856 * Use UINT8_MAX to indicate that no segmentation
8857 * is required (for IDT, GDT and LDT accesses).
8858 * @param GCPtrMem The address of the guest memory.
8859 * @param fAccess How the memory is being accessed. The
8860 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8861 * how to map the memory, while the
8862 * IEM_ACCESS_WHAT_XXX bit is used when raising
8863 * exceptions.
8864 */
8865IEM_STATIC VBOXSTRICTRC
8866iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8867{
8868 /*
8869 * Check the input and figure out which mapping entry to use.
8870 */
8871 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8872 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8873 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8874
8875 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8876 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8877 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8878 {
8879 iMemMap = iemMemMapFindFree(pVCpu);
8880 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8881 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8882 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8883 pVCpu->iem.s.aMemMappings[2].fAccess),
8884 VERR_IEM_IPE_9);
8885 }
8886
8887 /*
8888 * Map the memory, checking that we can actually access it. If something
8889 * slightly complicated happens, fall back on bounce buffering.
8890 */
8891 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8892 if (rcStrict != VINF_SUCCESS)
8893 return rcStrict;
8894
8895 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8896 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8897
8898 RTGCPHYS GCPhysFirst;
8899 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8900 if (rcStrict != VINF_SUCCESS)
8901 return rcStrict;
8902
8903 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8904 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8905 if (fAccess & IEM_ACCESS_TYPE_READ)
8906 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8907
8908 void *pvMem;
8909 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8910 if (rcStrict != VINF_SUCCESS)
8911 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8912
8913 /*
8914 * Fill in the mapping table entry.
8915 */
8916 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8917 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8918 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8919 pVCpu->iem.s.cActiveMappings++;
8920
8921 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8922 *ppvMem = pvMem;
8923
8924 return VINF_SUCCESS;
8925}
8926
8927
8928/**
8929 * Commits the guest memory if bounce buffered and unmaps it.
8930 *
8931 * @returns Strict VBox status code.
8932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8933 * @param pvMem The mapping.
8934 * @param fAccess The kind of access.
8935 */
8936IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8937{
8938 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8939 AssertReturn(iMemMap >= 0, iMemMap);
8940
8941 /* If it's bounce buffered, we may need to write back the buffer. */
8942 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8943 {
8944 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8945 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8946 }
8947 /* Otherwise unlock it. */
8948 else
8949 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8950
8951 /* Free the entry. */
8952 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8953 Assert(pVCpu->iem.s.cActiveMappings != 0);
8954 pVCpu->iem.s.cActiveMappings--;
8955 return VINF_SUCCESS;
8956}
8957
8958#ifdef IEM_WITH_SETJMP
8959
8960/**
8961 * Maps the specified guest memory for the given kind of access, longjmp on
8962 * error.
8963 *
8964 * This may be using bounce buffering of the memory if it's crossing a page
8965 * boundary or if there is an access handler installed for any of it. Because
8966 * of lock prefix guarantees, we're in for some extra clutter when this
8967 * happens.
8968 *
8969 * This may raise a \#GP, \#SS, \#PF or \#AC.
8970 *
8971 * @returns Pointer to the mapped memory.
8972 *
8973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8974 * @param cbMem The number of bytes to map. This is usually 1,
8975 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8976 * string operations it can be up to a page.
8977 * @param iSegReg The index of the segment register to use for
8978 * this access. The base and limits are checked.
8979 * Use UINT8_MAX to indicate that no segmentation
8980 * is required (for IDT, GDT and LDT accesses).
8981 * @param GCPtrMem The address of the guest memory.
8982 * @param fAccess How the memory is being accessed. The
8983 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8984 * how to map the memory, while the
8985 * IEM_ACCESS_WHAT_XXX bit is used when raising
8986 * exceptions.
8987 */
8988IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8989{
8990 /*
8991 * Check the input and figure out which mapping entry to use.
8992 */
8993 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8994 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8995 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8996
8997 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8998 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8999 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9000 {
9001 iMemMap = iemMemMapFindFree(pVCpu);
9002 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9003 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9004 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9005 pVCpu->iem.s.aMemMappings[2].fAccess),
9006 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9007 }
9008
9009 /*
9010 * Map the memory, checking that we can actually access it. If something
9011 * slightly complicated happens, fall back on bounce buffering.
9012 */
9013 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9014 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9015 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9016
9017 /* Crossing a page boundary? */
9018 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9019 { /* No (likely). */ }
9020 else
9021 {
9022 void *pvMem;
9023 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9024 if (rcStrict == VINF_SUCCESS)
9025 return pvMem;
9026 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9027 }
9028
9029 RTGCPHYS GCPhysFirst;
9030 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9031 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9032 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9033
9034 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9035 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9036 if (fAccess & IEM_ACCESS_TYPE_READ)
9037 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9038
9039 void *pvMem;
9040 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9041 if (rcStrict == VINF_SUCCESS)
9042 { /* likely */ }
9043 else
9044 {
9045 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9046 if (rcStrict == VINF_SUCCESS)
9047 return pvMem;
9048 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9049 }
9050
9051 /*
9052 * Fill in the mapping table entry.
9053 */
9054 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9055 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9056 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9057 pVCpu->iem.s.cActiveMappings++;
9058
9059 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9060 return pvMem;
9061}
9062
9063
9064/**
9065 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9066 *
9067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9068 * @param pvMem The mapping.
9069 * @param fAccess The kind of access.
9070 */
9071IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9072{
9073 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9074 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9075
9076 /* If it's bounce buffered, we may need to write back the buffer. */
9077 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9078 {
9079 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9080 {
9081 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9082 if (rcStrict == VINF_SUCCESS)
9083 return;
9084 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9085 }
9086 }
9087 /* Otherwise unlock it. */
9088 else
9089 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9090
9091 /* Free the entry. */
9092 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9093 Assert(pVCpu->iem.s.cActiveMappings != 0);
9094 pVCpu->iem.s.cActiveMappings--;
9095}
9096
9097#endif /* IEM_WITH_SETJMP */
9098
9099#ifndef IN_RING3
9100/**
9101 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9102 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9103 *
9104 * Allows the instruction to be completed and retired, while the IEM user will
9105 * return to ring-3 immediately afterwards and do the postponed writes there.
9106 *
9107 * @returns VBox status code (no strict statuses). Caller must check
9108 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9110 * @param pvMem The mapping.
9111 * @param fAccess The kind of access.
9112 */
9113IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9114{
9115 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9116 AssertReturn(iMemMap >= 0, iMemMap);
9117
9118 /* If it's bounce buffered, we may need to write back the buffer. */
9119 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9120 {
9121 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9122 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9123 }
9124 /* Otherwise unlock it. */
9125 else
9126 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9127
9128 /* Free the entry. */
9129 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9130 Assert(pVCpu->iem.s.cActiveMappings != 0);
9131 pVCpu->iem.s.cActiveMappings--;
9132 return VINF_SUCCESS;
9133}
9134#endif
9135
9136
9137/**
9138 * Rollbacks mappings, releasing page locks and such.
9139 *
9140 * The caller shall only call this after checking cActiveMappings.
9141 *
9142 * @returns Strict VBox status code to pass up.
9143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9144 */
9145IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9146{
9147 Assert(pVCpu->iem.s.cActiveMappings > 0);
9148
9149 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9150 while (iMemMap-- > 0)
9151 {
9152 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9153 if (fAccess != IEM_ACCESS_INVALID)
9154 {
9155 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9156 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9157 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9158 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9159 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9160 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9161 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9162 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9163 pVCpu->iem.s.cActiveMappings--;
9164 }
9165 }
9166}
9167
9168
9169/**
9170 * Fetches a data byte.
9171 *
9172 * @returns Strict VBox status code.
9173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9174 * @param pu8Dst Where to return the byte.
9175 * @param iSegReg The index of the segment register to use for
9176 * this access. The base and limits are checked.
9177 * @param GCPtrMem The address of the guest memory.
9178 */
9179IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9180{
9181 /* The lazy approach for now... */
9182 uint8_t const *pu8Src;
9183 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9184 if (rc == VINF_SUCCESS)
9185 {
9186 *pu8Dst = *pu8Src;
9187 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9188 }
9189 return rc;
9190}
9191
9192
9193#ifdef IEM_WITH_SETJMP
9194/**
9195 * Fetches a data byte, longjmp on error.
9196 *
9197 * @returns The byte.
9198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9199 * @param iSegReg The index of the segment register to use for
9200 * this access. The base and limits are checked.
9201 * @param GCPtrMem The address of the guest memory.
9202 */
9203DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9204{
9205 /* The lazy approach for now... */
9206 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9207 uint8_t const bRet = *pu8Src;
9208 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9209 return bRet;
9210}
9211#endif /* IEM_WITH_SETJMP */
9212
9213
9214/**
9215 * Fetches a data word.
9216 *
9217 * @returns Strict VBox status code.
9218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9219 * @param pu16Dst Where to return the word.
9220 * @param iSegReg The index of the segment register to use for
9221 * this access. The base and limits are checked.
9222 * @param GCPtrMem The address of the guest memory.
9223 */
9224IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9225{
9226 /* The lazy approach for now... */
9227 uint16_t const *pu16Src;
9228 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9229 if (rc == VINF_SUCCESS)
9230 {
9231 *pu16Dst = *pu16Src;
9232 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9233 }
9234 return rc;
9235}
9236
9237
9238#ifdef IEM_WITH_SETJMP
9239/**
9240 * Fetches a data word, longjmp on error.
9241 *
9242 * @returns The word
9243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9244 * @param iSegReg The index of the segment register to use for
9245 * this access. The base and limits are checked.
9246 * @param GCPtrMem The address of the guest memory.
9247 */
9248DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9249{
9250 /* The lazy approach for now... */
9251 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9252 uint16_t const u16Ret = *pu16Src;
9253 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9254 return u16Ret;
9255}
9256#endif
9257
9258
9259/**
9260 * Fetches a data dword.
9261 *
9262 * @returns Strict VBox status code.
9263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9264 * @param pu32Dst Where to return the dword.
9265 * @param iSegReg The index of the segment register to use for
9266 * this access. The base and limits are checked.
9267 * @param GCPtrMem The address of the guest memory.
9268 */
9269IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9270{
9271 /* The lazy approach for now... */
9272 uint32_t const *pu32Src;
9273 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9274 if (rc == VINF_SUCCESS)
9275 {
9276 *pu32Dst = *pu32Src;
9277 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9278 }
9279 return rc;
9280}
9281
9282
9283#ifdef IEM_WITH_SETJMP
9284
9285IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9286{
9287 Assert(cbMem >= 1);
9288 Assert(iSegReg < X86_SREG_COUNT);
9289
9290 /*
9291 * 64-bit mode is simpler.
9292 */
9293 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9294 {
9295 if (iSegReg >= X86_SREG_FS)
9296 {
9297 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9298 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9299 GCPtrMem += pSel->u64Base;
9300 }
9301
9302 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9303 return GCPtrMem;
9304 }
9305 /*
9306 * 16-bit and 32-bit segmentation.
9307 */
9308 else
9309 {
9310 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9311 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9312 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9313 == X86DESCATTR_P /* data, expand up */
9314 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9315 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9316 {
9317 /* expand up */
9318 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9319 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9320 && GCPtrLast32 > (uint32_t)GCPtrMem))
9321 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9322 }
9323 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9324 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9325 {
9326 /* expand down */
9327 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9328 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9329 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9330 && GCPtrLast32 > (uint32_t)GCPtrMem))
9331 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9332 }
9333 else
9334 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9335 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9336 }
9337 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9338}
9339
9340
9341IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9342{
9343 Assert(cbMem >= 1);
9344 Assert(iSegReg < X86_SREG_COUNT);
9345
9346 /*
9347 * 64-bit mode is simpler.
9348 */
9349 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9350 {
9351 if (iSegReg >= X86_SREG_FS)
9352 {
9353 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9354 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9355 GCPtrMem += pSel->u64Base;
9356 }
9357
9358 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9359 return GCPtrMem;
9360 }
9361 /*
9362 * 16-bit and 32-bit segmentation.
9363 */
9364 else
9365 {
9366 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9367 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9368 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9369 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9370 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9371 {
9372 /* expand up */
9373 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9374 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9375 && GCPtrLast32 > (uint32_t)GCPtrMem))
9376 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9377 }
9378 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9379 {
9380 /* expand down */
9381 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9382 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9383 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9384 && GCPtrLast32 > (uint32_t)GCPtrMem))
9385 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9386 }
9387 else
9388 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9389 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9390 }
9391 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9392}
9393
9394
9395/**
9396 * Fetches a data dword, longjmp on error, fallback/safe version.
9397 *
9398 * @returns The dword
9399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9400 * @param iSegReg The index of the segment register to use for
9401 * this access. The base and limits are checked.
9402 * @param GCPtrMem The address of the guest memory.
9403 */
9404IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9405{
9406 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9407 uint32_t const u32Ret = *pu32Src;
9408 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9409 return u32Ret;
9410}
9411
9412
9413/**
9414 * Fetches a data dword, longjmp on error.
9415 *
9416 * @returns The dword
9417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9418 * @param iSegReg The index of the segment register to use for
9419 * this access. The base and limits are checked.
9420 * @param GCPtrMem The address of the guest memory.
9421 */
9422DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9423{
9424# ifdef IEM_WITH_DATA_TLB
9425 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9426 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9427 {
9428 /// @todo more later.
9429 }
9430
9431 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9432# else
9433 /* The lazy approach. */
9434 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9435 uint32_t const u32Ret = *pu32Src;
9436 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9437 return u32Ret;
9438# endif
9439}
9440#endif
9441
9442
9443#ifdef SOME_UNUSED_FUNCTION
9444/**
9445 * Fetches a data dword and sign extends it to a qword.
9446 *
9447 * @returns Strict VBox status code.
9448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9449 * @param pu64Dst Where to return the sign extended value.
9450 * @param iSegReg The index of the segment register to use for
9451 * this access. The base and limits are checked.
9452 * @param GCPtrMem The address of the guest memory.
9453 */
9454IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9455{
9456 /* The lazy approach for now... */
9457 int32_t const *pi32Src;
9458 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9459 if (rc == VINF_SUCCESS)
9460 {
9461 *pu64Dst = *pi32Src;
9462 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9463 }
9464#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9465 else
9466 *pu64Dst = 0;
9467#endif
9468 return rc;
9469}
9470#endif
9471
9472
9473/**
9474 * Fetches a data qword.
9475 *
9476 * @returns Strict VBox status code.
9477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9478 * @param pu64Dst Where to return the qword.
9479 * @param iSegReg The index of the segment register to use for
9480 * this access. The base and limits are checked.
9481 * @param GCPtrMem The address of the guest memory.
9482 */
9483IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9484{
9485 /* The lazy approach for now... */
9486 uint64_t const *pu64Src;
9487 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9488 if (rc == VINF_SUCCESS)
9489 {
9490 *pu64Dst = *pu64Src;
9491 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9492 }
9493 return rc;
9494}
9495
9496
9497#ifdef IEM_WITH_SETJMP
9498/**
9499 * Fetches a data qword, longjmp on error.
9500 *
9501 * @returns The qword.
9502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9503 * @param iSegReg The index of the segment register to use for
9504 * this access. The base and limits are checked.
9505 * @param GCPtrMem The address of the guest memory.
9506 */
9507DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9508{
9509 /* The lazy approach for now... */
9510 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9511 uint64_t const u64Ret = *pu64Src;
9512 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9513 return u64Ret;
9514}
9515#endif
9516
9517
9518/**
9519 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9520 *
9521 * @returns Strict VBox status code.
9522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9523 * @param pu64Dst Where to return the qword.
9524 * @param iSegReg The index of the segment register to use for
9525 * this access. The base and limits are checked.
9526 * @param GCPtrMem The address of the guest memory.
9527 */
9528IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9529{
9530 /* The lazy approach for now... */
9531 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9532 if (RT_UNLIKELY(GCPtrMem & 15))
9533 return iemRaiseGeneralProtectionFault0(pVCpu);
9534
9535 uint64_t const *pu64Src;
9536 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9537 if (rc == VINF_SUCCESS)
9538 {
9539 *pu64Dst = *pu64Src;
9540 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9541 }
9542 return rc;
9543}
9544
9545
9546#ifdef IEM_WITH_SETJMP
9547/**
9548 * Fetches a data qword, longjmp on error.
9549 *
9550 * @returns The qword.
9551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9552 * @param iSegReg The index of the segment register to use for
9553 * this access. The base and limits are checked.
9554 * @param GCPtrMem The address of the guest memory.
9555 */
9556DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9557{
9558 /* The lazy approach for now... */
9559 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9560 if (RT_LIKELY(!(GCPtrMem & 15)))
9561 {
9562 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9563 uint64_t const u64Ret = *pu64Src;
9564 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9565 return u64Ret;
9566 }
9567
9568 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9569 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9570}
9571#endif
9572
9573
9574/**
9575 * Fetches a data tword.
9576 *
9577 * @returns Strict VBox status code.
9578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9579 * @param pr80Dst Where to return the tword.
9580 * @param iSegReg The index of the segment register to use for
9581 * this access. The base and limits are checked.
9582 * @param GCPtrMem The address of the guest memory.
9583 */
9584IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9585{
9586 /* The lazy approach for now... */
9587 PCRTFLOAT80U pr80Src;
9588 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9589 if (rc == VINF_SUCCESS)
9590 {
9591 *pr80Dst = *pr80Src;
9592 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9593 }
9594 return rc;
9595}
9596
9597
9598#ifdef IEM_WITH_SETJMP
9599/**
9600 * Fetches a data tword, longjmp on error.
9601 *
9602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9603 * @param pr80Dst Where to return the tword.
9604 * @param iSegReg The index of the segment register to use for
9605 * this access. The base and limits are checked.
9606 * @param GCPtrMem The address of the guest memory.
9607 */
9608DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9609{
9610 /* The lazy approach for now... */
9611 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9612 *pr80Dst = *pr80Src;
9613 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9614}
9615#endif
9616
9617
9618/**
9619 * Fetches a data dqword (double qword), generally SSE related.
9620 *
9621 * @returns Strict VBox status code.
9622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9623 * @param pu128Dst Where to return the qword.
9624 * @param iSegReg The index of the segment register to use for
9625 * this access. The base and limits are checked.
9626 * @param GCPtrMem The address of the guest memory.
9627 */
9628IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9629{
9630 /* The lazy approach for now... */
9631 PCRTUINT128U pu128Src;
9632 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9633 if (rc == VINF_SUCCESS)
9634 {
9635 pu128Dst->au64[0] = pu128Src->au64[0];
9636 pu128Dst->au64[1] = pu128Src->au64[1];
9637 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9638 }
9639 return rc;
9640}
9641
9642
9643#ifdef IEM_WITH_SETJMP
9644/**
9645 * Fetches a data dqword (double qword), generally SSE related.
9646 *
9647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9648 * @param pu128Dst Where to return the qword.
9649 * @param iSegReg The index of the segment register to use for
9650 * this access. The base and limits are checked.
9651 * @param GCPtrMem The address of the guest memory.
9652 */
9653IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9654{
9655 /* The lazy approach for now... */
9656 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9657 pu128Dst->au64[0] = pu128Src->au64[0];
9658 pu128Dst->au64[1] = pu128Src->au64[1];
9659 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9660}
9661#endif
9662
9663
9664/**
9665 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9666 * related.
9667 *
9668 * Raises \#GP(0) if not aligned.
9669 *
9670 * @returns Strict VBox status code.
9671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9672 * @param pu128Dst Where to return the qword.
9673 * @param iSegReg The index of the segment register to use for
9674 * this access. The base and limits are checked.
9675 * @param GCPtrMem The address of the guest memory.
9676 */
9677IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9678{
9679 /* The lazy approach for now... */
9680 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9681 if ( (GCPtrMem & 15)
9682 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9683 return iemRaiseGeneralProtectionFault0(pVCpu);
9684
9685 PCRTUINT128U pu128Src;
9686 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9687 if (rc == VINF_SUCCESS)
9688 {
9689 pu128Dst->au64[0] = pu128Src->au64[0];
9690 pu128Dst->au64[1] = pu128Src->au64[1];
9691 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9692 }
9693 return rc;
9694}
9695
9696
9697#ifdef IEM_WITH_SETJMP
9698/**
9699 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9700 * related, longjmp on error.
9701 *
9702 * Raises \#GP(0) if not aligned.
9703 *
9704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9705 * @param pu128Dst Where to return the qword.
9706 * @param iSegReg The index of the segment register to use for
9707 * this access. The base and limits are checked.
9708 * @param GCPtrMem The address of the guest memory.
9709 */
9710DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9711{
9712 /* The lazy approach for now... */
9713 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9714 if ( (GCPtrMem & 15) == 0
9715 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9716 {
9717 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9718 pu128Dst->au64[0] = pu128Src->au64[0];
9719 pu128Dst->au64[1] = pu128Src->au64[1];
9720 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9721 return;
9722 }
9723
9724 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9725 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9726}
9727#endif
9728
9729
9730/**
9731 * Fetches a data oword (octo word), generally AVX related.
9732 *
9733 * @returns Strict VBox status code.
9734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9735 * @param pu256Dst Where to return the qword.
9736 * @param iSegReg The index of the segment register to use for
9737 * this access. The base and limits are checked.
9738 * @param GCPtrMem The address of the guest memory.
9739 */
9740IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9741{
9742 /* The lazy approach for now... */
9743 PCRTUINT256U pu256Src;
9744 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9745 if (rc == VINF_SUCCESS)
9746 {
9747 pu256Dst->au64[0] = pu256Src->au64[0];
9748 pu256Dst->au64[1] = pu256Src->au64[1];
9749 pu256Dst->au64[2] = pu256Src->au64[2];
9750 pu256Dst->au64[3] = pu256Src->au64[3];
9751 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9752 }
9753 return rc;
9754}
9755
9756
9757#ifdef IEM_WITH_SETJMP
9758/**
9759 * Fetches a data oword (octo word), generally AVX related.
9760 *
9761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9762 * @param pu256Dst Where to return the qword.
9763 * @param iSegReg The index of the segment register to use for
9764 * this access. The base and limits are checked.
9765 * @param GCPtrMem The address of the guest memory.
9766 */
9767IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9768{
9769 /* The lazy approach for now... */
9770 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9771 pu256Dst->au64[0] = pu256Src->au64[0];
9772 pu256Dst->au64[1] = pu256Src->au64[1];
9773 pu256Dst->au64[2] = pu256Src->au64[2];
9774 pu256Dst->au64[3] = pu256Src->au64[3];
9775 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9776}
9777#endif
9778
9779
9780/**
9781 * Fetches a data oword (octo word) at an aligned address, generally AVX
9782 * related.
9783 *
9784 * Raises \#GP(0) if not aligned.
9785 *
9786 * @returns Strict VBox status code.
9787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9788 * @param pu256Dst Where to return the qword.
9789 * @param iSegReg The index of the segment register to use for
9790 * this access. The base and limits are checked.
9791 * @param GCPtrMem The address of the guest memory.
9792 */
9793IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9794{
9795 /* The lazy approach for now... */
9796 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9797 if (GCPtrMem & 31)
9798 return iemRaiseGeneralProtectionFault0(pVCpu);
9799
9800 PCRTUINT256U pu256Src;
9801 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9802 if (rc == VINF_SUCCESS)
9803 {
9804 pu256Dst->au64[0] = pu256Src->au64[0];
9805 pu256Dst->au64[1] = pu256Src->au64[1];
9806 pu256Dst->au64[2] = pu256Src->au64[2];
9807 pu256Dst->au64[3] = pu256Src->au64[3];
9808 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9809 }
9810 return rc;
9811}
9812
9813
9814#ifdef IEM_WITH_SETJMP
9815/**
9816 * Fetches a data oword (octo word) at an aligned address, generally AVX
9817 * related, longjmp on error.
9818 *
9819 * Raises \#GP(0) if not aligned.
9820 *
9821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9822 * @param pu256Dst Where to return the qword.
9823 * @param iSegReg The index of the segment register to use for
9824 * this access. The base and limits are checked.
9825 * @param GCPtrMem The address of the guest memory.
9826 */
9827DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9828{
9829 /* The lazy approach for now... */
9830 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9831 if ((GCPtrMem & 31) == 0)
9832 {
9833 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9834 pu256Dst->au64[0] = pu256Src->au64[0];
9835 pu256Dst->au64[1] = pu256Src->au64[1];
9836 pu256Dst->au64[2] = pu256Src->au64[2];
9837 pu256Dst->au64[3] = pu256Src->au64[3];
9838 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9839 return;
9840 }
9841
9842 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9843 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9844}
9845#endif
9846
9847
9848
9849/**
9850 * Fetches a descriptor register (lgdt, lidt).
9851 *
9852 * @returns Strict VBox status code.
9853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9854 * @param pcbLimit Where to return the limit.
9855 * @param pGCPtrBase Where to return the base.
9856 * @param iSegReg The index of the segment register to use for
9857 * this access. The base and limits are checked.
9858 * @param GCPtrMem The address of the guest memory.
9859 * @param enmOpSize The effective operand size.
9860 */
9861IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9862 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9863{
9864 /*
9865 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9866 * little special:
9867 * - The two reads are done separately.
9868 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9869 * - We suspect the 386 to actually commit the limit before the base in
9870 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9871 * don't try emulate this eccentric behavior, because it's not well
9872 * enough understood and rather hard to trigger.
9873 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9874 */
9875 VBOXSTRICTRC rcStrict;
9876 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9877 {
9878 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9879 if (rcStrict == VINF_SUCCESS)
9880 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9881 }
9882 else
9883 {
9884 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9885 if (enmOpSize == IEMMODE_32BIT)
9886 {
9887 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9888 {
9889 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9890 if (rcStrict == VINF_SUCCESS)
9891 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9892 }
9893 else
9894 {
9895 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9896 if (rcStrict == VINF_SUCCESS)
9897 {
9898 *pcbLimit = (uint16_t)uTmp;
9899 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9900 }
9901 }
9902 if (rcStrict == VINF_SUCCESS)
9903 *pGCPtrBase = uTmp;
9904 }
9905 else
9906 {
9907 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9908 if (rcStrict == VINF_SUCCESS)
9909 {
9910 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9911 if (rcStrict == VINF_SUCCESS)
9912 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9913 }
9914 }
9915 }
9916 return rcStrict;
9917}
9918
9919
9920
9921/**
9922 * Stores a data byte.
9923 *
9924 * @returns Strict VBox status code.
9925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9926 * @param iSegReg The index of the segment register to use for
9927 * this access. The base and limits are checked.
9928 * @param GCPtrMem The address of the guest memory.
9929 * @param u8Value The value to store.
9930 */
9931IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9932{
9933 /* The lazy approach for now... */
9934 uint8_t *pu8Dst;
9935 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9936 if (rc == VINF_SUCCESS)
9937 {
9938 *pu8Dst = u8Value;
9939 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9940 }
9941 return rc;
9942}
9943
9944
9945#ifdef IEM_WITH_SETJMP
9946/**
9947 * Stores a data byte, longjmp on error.
9948 *
9949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9950 * @param iSegReg The index of the segment register to use for
9951 * this access. The base and limits are checked.
9952 * @param GCPtrMem The address of the guest memory.
9953 * @param u8Value The value to store.
9954 */
9955IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9956{
9957 /* The lazy approach for now... */
9958 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9959 *pu8Dst = u8Value;
9960 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9961}
9962#endif
9963
9964
9965/**
9966 * Stores a data word.
9967 *
9968 * @returns Strict VBox status code.
9969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9970 * @param iSegReg The index of the segment register to use for
9971 * this access. The base and limits are checked.
9972 * @param GCPtrMem The address of the guest memory.
9973 * @param u16Value The value to store.
9974 */
9975IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9976{
9977 /* The lazy approach for now... */
9978 uint16_t *pu16Dst;
9979 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9980 if (rc == VINF_SUCCESS)
9981 {
9982 *pu16Dst = u16Value;
9983 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9984 }
9985 return rc;
9986}
9987
9988
9989#ifdef IEM_WITH_SETJMP
9990/**
9991 * Stores a data word, longjmp on error.
9992 *
9993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9994 * @param iSegReg The index of the segment register to use for
9995 * this access. The base and limits are checked.
9996 * @param GCPtrMem The address of the guest memory.
9997 * @param u16Value The value to store.
9998 */
9999IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10000{
10001 /* The lazy approach for now... */
10002 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10003 *pu16Dst = u16Value;
10004 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10005}
10006#endif
10007
10008
10009/**
10010 * Stores a data dword.
10011 *
10012 * @returns Strict VBox status code.
10013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10014 * @param iSegReg The index of the segment register to use for
10015 * this access. The base and limits are checked.
10016 * @param GCPtrMem The address of the guest memory.
10017 * @param u32Value The value to store.
10018 */
10019IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10020{
10021 /* The lazy approach for now... */
10022 uint32_t *pu32Dst;
10023 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10024 if (rc == VINF_SUCCESS)
10025 {
10026 *pu32Dst = u32Value;
10027 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10028 }
10029 return rc;
10030}
10031
10032
10033#ifdef IEM_WITH_SETJMP
10034/**
10035 * Stores a data dword.
10036 *
10037 * @returns Strict VBox status code.
10038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10039 * @param iSegReg The index of the segment register to use for
10040 * this access. The base and limits are checked.
10041 * @param GCPtrMem The address of the guest memory.
10042 * @param u32Value The value to store.
10043 */
10044IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10045{
10046 /* The lazy approach for now... */
10047 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10048 *pu32Dst = u32Value;
10049 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10050}
10051#endif
10052
10053
10054/**
10055 * Stores a data qword.
10056 *
10057 * @returns Strict VBox status code.
10058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10059 * @param iSegReg The index of the segment register to use for
10060 * this access. The base and limits are checked.
10061 * @param GCPtrMem The address of the guest memory.
10062 * @param u64Value The value to store.
10063 */
10064IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10065{
10066 /* The lazy approach for now... */
10067 uint64_t *pu64Dst;
10068 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10069 if (rc == VINF_SUCCESS)
10070 {
10071 *pu64Dst = u64Value;
10072 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10073 }
10074 return rc;
10075}
10076
10077
10078#ifdef IEM_WITH_SETJMP
10079/**
10080 * Stores a data qword, longjmp on error.
10081 *
10082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10083 * @param iSegReg The index of the segment register to use for
10084 * this access. The base and limits are checked.
10085 * @param GCPtrMem The address of the guest memory.
10086 * @param u64Value The value to store.
10087 */
10088IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10089{
10090 /* The lazy approach for now... */
10091 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10092 *pu64Dst = u64Value;
10093 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10094}
10095#endif
10096
10097
10098/**
10099 * Stores a data dqword.
10100 *
10101 * @returns Strict VBox status code.
10102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10103 * @param iSegReg The index of the segment register to use for
10104 * this access. The base and limits are checked.
10105 * @param GCPtrMem The address of the guest memory.
10106 * @param u128Value The value to store.
10107 */
10108IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10109{
10110 /* The lazy approach for now... */
10111 PRTUINT128U pu128Dst;
10112 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10113 if (rc == VINF_SUCCESS)
10114 {
10115 pu128Dst->au64[0] = u128Value.au64[0];
10116 pu128Dst->au64[1] = u128Value.au64[1];
10117 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10118 }
10119 return rc;
10120}
10121
10122
10123#ifdef IEM_WITH_SETJMP
10124/**
10125 * Stores a data dqword, longjmp on error.
10126 *
10127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10128 * @param iSegReg The index of the segment register to use for
10129 * this access. The base and limits are checked.
10130 * @param GCPtrMem The address of the guest memory.
10131 * @param u128Value The value to store.
10132 */
10133IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10134{
10135 /* The lazy approach for now... */
10136 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10137 pu128Dst->au64[0] = u128Value.au64[0];
10138 pu128Dst->au64[1] = u128Value.au64[1];
10139 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10140}
10141#endif
10142
10143
10144/**
10145 * Stores a data dqword, SSE aligned.
10146 *
10147 * @returns Strict VBox status code.
10148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10149 * @param iSegReg The index of the segment register to use for
10150 * this access. The base and limits are checked.
10151 * @param GCPtrMem The address of the guest memory.
10152 * @param u128Value The value to store.
10153 */
10154IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10155{
10156 /* The lazy approach for now... */
10157 if ( (GCPtrMem & 15)
10158 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10159 return iemRaiseGeneralProtectionFault0(pVCpu);
10160
10161 PRTUINT128U pu128Dst;
10162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10163 if (rc == VINF_SUCCESS)
10164 {
10165 pu128Dst->au64[0] = u128Value.au64[0];
10166 pu128Dst->au64[1] = u128Value.au64[1];
10167 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10168 }
10169 return rc;
10170}
10171
10172
10173#ifdef IEM_WITH_SETJMP
10174/**
10175 * Stores a data dqword, SSE aligned.
10176 *
10177 * @returns Strict VBox status code.
10178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10179 * @param iSegReg The index of the segment register to use for
10180 * this access. The base and limits are checked.
10181 * @param GCPtrMem The address of the guest memory.
10182 * @param u128Value The value to store.
10183 */
10184DECL_NO_INLINE(IEM_STATIC, void)
10185iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10186{
10187 /* The lazy approach for now... */
10188 if ( (GCPtrMem & 15) == 0
10189 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10190 {
10191 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10192 pu128Dst->au64[0] = u128Value.au64[0];
10193 pu128Dst->au64[1] = u128Value.au64[1];
10194 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10195 return;
10196 }
10197
10198 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10199 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10200}
10201#endif
10202
10203
10204/**
10205 * Stores a data dqword.
10206 *
10207 * @returns Strict VBox status code.
10208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10209 * @param iSegReg The index of the segment register to use for
10210 * this access. The base and limits are checked.
10211 * @param GCPtrMem The address of the guest memory.
10212 * @param pu256Value Pointer to the value to store.
10213 */
10214IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10215{
10216 /* The lazy approach for now... */
10217 PRTUINT256U pu256Dst;
10218 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10219 if (rc == VINF_SUCCESS)
10220 {
10221 pu256Dst->au64[0] = pu256Value->au64[0];
10222 pu256Dst->au64[1] = pu256Value->au64[1];
10223 pu256Dst->au64[2] = pu256Value->au64[2];
10224 pu256Dst->au64[3] = pu256Value->au64[3];
10225 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10226 }
10227 return rc;
10228}
10229
10230
10231#ifdef IEM_WITH_SETJMP
10232/**
10233 * Stores a data dqword, longjmp on error.
10234 *
10235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10236 * @param iSegReg The index of the segment register to use for
10237 * this access. The base and limits are checked.
10238 * @param GCPtrMem The address of the guest memory.
10239 * @param pu256Value Pointer to the value to store.
10240 */
10241IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10242{
10243 /* The lazy approach for now... */
10244 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10245 pu256Dst->au64[0] = pu256Value->au64[0];
10246 pu256Dst->au64[1] = pu256Value->au64[1];
10247 pu256Dst->au64[2] = pu256Value->au64[2];
10248 pu256Dst->au64[3] = pu256Value->au64[3];
10249 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10250}
10251#endif
10252
10253
10254/**
10255 * Stores a data dqword, AVX aligned.
10256 *
10257 * @returns Strict VBox status code.
10258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10259 * @param iSegReg The index of the segment register to use for
10260 * this access. The base and limits are checked.
10261 * @param GCPtrMem The address of the guest memory.
10262 * @param pu256Value Pointer to the value to store.
10263 */
10264IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10265{
10266 /* The lazy approach for now... */
10267 if (GCPtrMem & 31)
10268 return iemRaiseGeneralProtectionFault0(pVCpu);
10269
10270 PRTUINT256U pu256Dst;
10271 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10272 if (rc == VINF_SUCCESS)
10273 {
10274 pu256Dst->au64[0] = pu256Value->au64[0];
10275 pu256Dst->au64[1] = pu256Value->au64[1];
10276 pu256Dst->au64[2] = pu256Value->au64[2];
10277 pu256Dst->au64[3] = pu256Value->au64[3];
10278 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10279 }
10280 return rc;
10281}
10282
10283
10284#ifdef IEM_WITH_SETJMP
10285/**
10286 * Stores a data dqword, AVX aligned.
10287 *
10288 * @returns Strict VBox status code.
10289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10290 * @param iSegReg The index of the segment register to use for
10291 * this access. The base and limits are checked.
10292 * @param GCPtrMem The address of the guest memory.
10293 * @param pu256Value Pointer to the value to store.
10294 */
10295DECL_NO_INLINE(IEM_STATIC, void)
10296iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10297{
10298 /* The lazy approach for now... */
10299 if ((GCPtrMem & 31) == 0)
10300 {
10301 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10302 pu256Dst->au64[0] = pu256Value->au64[0];
10303 pu256Dst->au64[1] = pu256Value->au64[1];
10304 pu256Dst->au64[2] = pu256Value->au64[2];
10305 pu256Dst->au64[3] = pu256Value->au64[3];
10306 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10307 return;
10308 }
10309
10310 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10311 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10312}
10313#endif
10314
10315
10316/**
10317 * Stores a descriptor register (sgdt, sidt).
10318 *
10319 * @returns Strict VBox status code.
10320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10321 * @param cbLimit The limit.
10322 * @param GCPtrBase The base address.
10323 * @param iSegReg The index of the segment register to use for
10324 * this access. The base and limits are checked.
10325 * @param GCPtrMem The address of the guest memory.
10326 */
10327IEM_STATIC VBOXSTRICTRC
10328iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10329{
10330 /*
10331 * The SIDT and SGDT instructions actually stores the data using two
10332 * independent writes. The instructions does not respond to opsize prefixes.
10333 */
10334 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10335 if (rcStrict == VINF_SUCCESS)
10336 {
10337 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10338 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10339 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10340 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10341 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10342 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10343 else
10344 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10345 }
10346 return rcStrict;
10347}
10348
10349
10350/**
10351 * Pushes a word onto the stack.
10352 *
10353 * @returns Strict VBox status code.
10354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10355 * @param u16Value The value to push.
10356 */
10357IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10358{
10359 /* Increment the stack pointer. */
10360 uint64_t uNewRsp;
10361 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10362
10363 /* Write the word the lazy way. */
10364 uint16_t *pu16Dst;
10365 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10366 if (rc == VINF_SUCCESS)
10367 {
10368 *pu16Dst = u16Value;
10369 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10370 }
10371
10372 /* Commit the new RSP value unless we an access handler made trouble. */
10373 if (rc == VINF_SUCCESS)
10374 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10375
10376 return rc;
10377}
10378
10379
10380/**
10381 * Pushes a dword onto the stack.
10382 *
10383 * @returns Strict VBox status code.
10384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10385 * @param u32Value The value to push.
10386 */
10387IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10388{
10389 /* Increment the stack pointer. */
10390 uint64_t uNewRsp;
10391 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10392
10393 /* Write the dword the lazy way. */
10394 uint32_t *pu32Dst;
10395 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10396 if (rc == VINF_SUCCESS)
10397 {
10398 *pu32Dst = u32Value;
10399 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10400 }
10401
10402 /* Commit the new RSP value unless we an access handler made trouble. */
10403 if (rc == VINF_SUCCESS)
10404 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10405
10406 return rc;
10407}
10408
10409
10410/**
10411 * Pushes a dword segment register value onto the stack.
10412 *
10413 * @returns Strict VBox status code.
10414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10415 * @param u32Value The value to push.
10416 */
10417IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10418{
10419 /* Increment the stack pointer. */
10420 uint64_t uNewRsp;
10421 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10422
10423 /* The intel docs talks about zero extending the selector register
10424 value. My actual intel CPU here might be zero extending the value
10425 but it still only writes the lower word... */
10426 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10427 * happens when crossing an electric page boundrary, is the high word checked
10428 * for write accessibility or not? Probably it is. What about segment limits?
10429 * It appears this behavior is also shared with trap error codes.
10430 *
10431 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10432 * ancient hardware when it actually did change. */
10433 uint16_t *pu16Dst;
10434 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10435 if (rc == VINF_SUCCESS)
10436 {
10437 *pu16Dst = (uint16_t)u32Value;
10438 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10439 }
10440
10441 /* Commit the new RSP value unless we an access handler made trouble. */
10442 if (rc == VINF_SUCCESS)
10443 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10444
10445 return rc;
10446}
10447
10448
10449/**
10450 * Pushes a qword onto the stack.
10451 *
10452 * @returns Strict VBox status code.
10453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10454 * @param u64Value The value to push.
10455 */
10456IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10457{
10458 /* Increment the stack pointer. */
10459 uint64_t uNewRsp;
10460 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10461
10462 /* Write the word the lazy way. */
10463 uint64_t *pu64Dst;
10464 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10465 if (rc == VINF_SUCCESS)
10466 {
10467 *pu64Dst = u64Value;
10468 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10469 }
10470
10471 /* Commit the new RSP value unless we an access handler made trouble. */
10472 if (rc == VINF_SUCCESS)
10473 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10474
10475 return rc;
10476}
10477
10478
10479/**
10480 * Pops a word from the stack.
10481 *
10482 * @returns Strict VBox status code.
10483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10484 * @param pu16Value Where to store the popped value.
10485 */
10486IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10487{
10488 /* Increment the stack pointer. */
10489 uint64_t uNewRsp;
10490 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10491
10492 /* Write the word the lazy way. */
10493 uint16_t const *pu16Src;
10494 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10495 if (rc == VINF_SUCCESS)
10496 {
10497 *pu16Value = *pu16Src;
10498 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10499
10500 /* Commit the new RSP value. */
10501 if (rc == VINF_SUCCESS)
10502 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10503 }
10504
10505 return rc;
10506}
10507
10508
10509/**
10510 * Pops a dword from the stack.
10511 *
10512 * @returns Strict VBox status code.
10513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10514 * @param pu32Value Where to store the popped value.
10515 */
10516IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10517{
10518 /* Increment the stack pointer. */
10519 uint64_t uNewRsp;
10520 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10521
10522 /* Write the word the lazy way. */
10523 uint32_t const *pu32Src;
10524 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10525 if (rc == VINF_SUCCESS)
10526 {
10527 *pu32Value = *pu32Src;
10528 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10529
10530 /* Commit the new RSP value. */
10531 if (rc == VINF_SUCCESS)
10532 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10533 }
10534
10535 return rc;
10536}
10537
10538
10539/**
10540 * Pops a qword from the stack.
10541 *
10542 * @returns Strict VBox status code.
10543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10544 * @param pu64Value Where to store the popped value.
10545 */
10546IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10547{
10548 /* Increment the stack pointer. */
10549 uint64_t uNewRsp;
10550 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10551
10552 /* Write the word the lazy way. */
10553 uint64_t const *pu64Src;
10554 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10555 if (rc == VINF_SUCCESS)
10556 {
10557 *pu64Value = *pu64Src;
10558 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10559
10560 /* Commit the new RSP value. */
10561 if (rc == VINF_SUCCESS)
10562 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10563 }
10564
10565 return rc;
10566}
10567
10568
10569/**
10570 * Pushes a word onto the stack, using a temporary stack pointer.
10571 *
10572 * @returns Strict VBox status code.
10573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10574 * @param u16Value The value to push.
10575 * @param pTmpRsp Pointer to the temporary stack pointer.
10576 */
10577IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10578{
10579 /* Increment the stack pointer. */
10580 RTUINT64U NewRsp = *pTmpRsp;
10581 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10582
10583 /* Write the word the lazy way. */
10584 uint16_t *pu16Dst;
10585 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10586 if (rc == VINF_SUCCESS)
10587 {
10588 *pu16Dst = u16Value;
10589 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10590 }
10591
10592 /* Commit the new RSP value unless we an access handler made trouble. */
10593 if (rc == VINF_SUCCESS)
10594 *pTmpRsp = NewRsp;
10595
10596 return rc;
10597}
10598
10599
10600/**
10601 * Pushes a dword onto the stack, using a temporary stack pointer.
10602 *
10603 * @returns Strict VBox status code.
10604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10605 * @param u32Value The value to push.
10606 * @param pTmpRsp Pointer to the temporary stack pointer.
10607 */
10608IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10609{
10610 /* Increment the stack pointer. */
10611 RTUINT64U NewRsp = *pTmpRsp;
10612 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10613
10614 /* Write the word the lazy way. */
10615 uint32_t *pu32Dst;
10616 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10617 if (rc == VINF_SUCCESS)
10618 {
10619 *pu32Dst = u32Value;
10620 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10621 }
10622
10623 /* Commit the new RSP value unless we an access handler made trouble. */
10624 if (rc == VINF_SUCCESS)
10625 *pTmpRsp = NewRsp;
10626
10627 return rc;
10628}
10629
10630
10631/**
10632 * Pushes a dword onto the stack, using a temporary stack pointer.
10633 *
10634 * @returns Strict VBox status code.
10635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10636 * @param u64Value The value to push.
10637 * @param pTmpRsp Pointer to the temporary stack pointer.
10638 */
10639IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10640{
10641 /* Increment the stack pointer. */
10642 RTUINT64U NewRsp = *pTmpRsp;
10643 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10644
10645 /* Write the word the lazy way. */
10646 uint64_t *pu64Dst;
10647 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10648 if (rc == VINF_SUCCESS)
10649 {
10650 *pu64Dst = u64Value;
10651 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10652 }
10653
10654 /* Commit the new RSP value unless we an access handler made trouble. */
10655 if (rc == VINF_SUCCESS)
10656 *pTmpRsp = NewRsp;
10657
10658 return rc;
10659}
10660
10661
10662/**
10663 * Pops a word from the stack, using a temporary stack pointer.
10664 *
10665 * @returns Strict VBox status code.
10666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10667 * @param pu16Value Where to store the popped value.
10668 * @param pTmpRsp Pointer to the temporary stack pointer.
10669 */
10670IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10671{
10672 /* Increment the stack pointer. */
10673 RTUINT64U NewRsp = *pTmpRsp;
10674 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10675
10676 /* Write the word the lazy way. */
10677 uint16_t const *pu16Src;
10678 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10679 if (rc == VINF_SUCCESS)
10680 {
10681 *pu16Value = *pu16Src;
10682 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10683
10684 /* Commit the new RSP value. */
10685 if (rc == VINF_SUCCESS)
10686 *pTmpRsp = NewRsp;
10687 }
10688
10689 return rc;
10690}
10691
10692
10693/**
10694 * Pops a dword from the stack, using a temporary stack pointer.
10695 *
10696 * @returns Strict VBox status code.
10697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10698 * @param pu32Value Where to store the popped value.
10699 * @param pTmpRsp Pointer to the temporary stack pointer.
10700 */
10701IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10702{
10703 /* Increment the stack pointer. */
10704 RTUINT64U NewRsp = *pTmpRsp;
10705 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10706
10707 /* Write the word the lazy way. */
10708 uint32_t const *pu32Src;
10709 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10710 if (rc == VINF_SUCCESS)
10711 {
10712 *pu32Value = *pu32Src;
10713 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10714
10715 /* Commit the new RSP value. */
10716 if (rc == VINF_SUCCESS)
10717 *pTmpRsp = NewRsp;
10718 }
10719
10720 return rc;
10721}
10722
10723
10724/**
10725 * Pops a qword from the stack, using a temporary stack pointer.
10726 *
10727 * @returns Strict VBox status code.
10728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10729 * @param pu64Value Where to store the popped value.
10730 * @param pTmpRsp Pointer to the temporary stack pointer.
10731 */
10732IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10733{
10734 /* Increment the stack pointer. */
10735 RTUINT64U NewRsp = *pTmpRsp;
10736 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10737
10738 /* Write the word the lazy way. */
10739 uint64_t const *pu64Src;
10740 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10741 if (rcStrict == VINF_SUCCESS)
10742 {
10743 *pu64Value = *pu64Src;
10744 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10745
10746 /* Commit the new RSP value. */
10747 if (rcStrict == VINF_SUCCESS)
10748 *pTmpRsp = NewRsp;
10749 }
10750
10751 return rcStrict;
10752}
10753
10754
10755/**
10756 * Begin a special stack push (used by interrupt, exceptions and such).
10757 *
10758 * This will raise \#SS or \#PF if appropriate.
10759 *
10760 * @returns Strict VBox status code.
10761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10762 * @param cbMem The number of bytes to push onto the stack.
10763 * @param ppvMem Where to return the pointer to the stack memory.
10764 * As with the other memory functions this could be
10765 * direct access or bounce buffered access, so
10766 * don't commit register until the commit call
10767 * succeeds.
10768 * @param puNewRsp Where to return the new RSP value. This must be
10769 * passed unchanged to
10770 * iemMemStackPushCommitSpecial().
10771 */
10772IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10773{
10774 Assert(cbMem < UINT8_MAX);
10775 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10776 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10777}
10778
10779
10780/**
10781 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10782 *
10783 * This will update the rSP.
10784 *
10785 * @returns Strict VBox status code.
10786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10787 * @param pvMem The pointer returned by
10788 * iemMemStackPushBeginSpecial().
10789 * @param uNewRsp The new RSP value returned by
10790 * iemMemStackPushBeginSpecial().
10791 */
10792IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10793{
10794 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10795 if (rcStrict == VINF_SUCCESS)
10796 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10797 return rcStrict;
10798}
10799
10800
10801/**
10802 * Begin a special stack pop (used by iret, retf and such).
10803 *
10804 * This will raise \#SS or \#PF if appropriate.
10805 *
10806 * @returns Strict VBox status code.
10807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10808 * @param cbMem The number of bytes to pop from the stack.
10809 * @param ppvMem Where to return the pointer to the stack memory.
10810 * @param puNewRsp Where to return the new RSP value. This must be
10811 * assigned to CPUMCTX::rsp manually some time
10812 * after iemMemStackPopDoneSpecial() has been
10813 * called.
10814 */
10815IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10816{
10817 Assert(cbMem < UINT8_MAX);
10818 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10819 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10820}
10821
10822
10823/**
10824 * Continue a special stack pop (used by iret and retf).
10825 *
10826 * This will raise \#SS or \#PF if appropriate.
10827 *
10828 * @returns Strict VBox status code.
10829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10830 * @param cbMem The number of bytes to pop from the stack.
10831 * @param ppvMem Where to return the pointer to the stack memory.
10832 * @param puNewRsp Where to return the new RSP value. This must be
10833 * assigned to CPUMCTX::rsp manually some time
10834 * after iemMemStackPopDoneSpecial() has been
10835 * called.
10836 */
10837IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10838{
10839 Assert(cbMem < UINT8_MAX);
10840 RTUINT64U NewRsp;
10841 NewRsp.u = *puNewRsp;
10842 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10843 *puNewRsp = NewRsp.u;
10844 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10845}
10846
10847
10848/**
10849 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10850 * iemMemStackPopContinueSpecial).
10851 *
10852 * The caller will manually commit the rSP.
10853 *
10854 * @returns Strict VBox status code.
10855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10856 * @param pvMem The pointer returned by
10857 * iemMemStackPopBeginSpecial() or
10858 * iemMemStackPopContinueSpecial().
10859 */
10860IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10861{
10862 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10863}
10864
10865
10866/**
10867 * Fetches a system table byte.
10868 *
10869 * @returns Strict VBox status code.
10870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10871 * @param pbDst Where to return the byte.
10872 * @param iSegReg The index of the segment register to use for
10873 * this access. The base and limits are checked.
10874 * @param GCPtrMem The address of the guest memory.
10875 */
10876IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10877{
10878 /* The lazy approach for now... */
10879 uint8_t const *pbSrc;
10880 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10881 if (rc == VINF_SUCCESS)
10882 {
10883 *pbDst = *pbSrc;
10884 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10885 }
10886 return rc;
10887}
10888
10889
10890/**
10891 * Fetches a system table word.
10892 *
10893 * @returns Strict VBox status code.
10894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10895 * @param pu16Dst Where to return the word.
10896 * @param iSegReg The index of the segment register to use for
10897 * this access. The base and limits are checked.
10898 * @param GCPtrMem The address of the guest memory.
10899 */
10900IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10901{
10902 /* The lazy approach for now... */
10903 uint16_t const *pu16Src;
10904 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10905 if (rc == VINF_SUCCESS)
10906 {
10907 *pu16Dst = *pu16Src;
10908 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10909 }
10910 return rc;
10911}
10912
10913
10914/**
10915 * Fetches a system table dword.
10916 *
10917 * @returns Strict VBox status code.
10918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10919 * @param pu32Dst Where to return the dword.
10920 * @param iSegReg The index of the segment register to use for
10921 * this access. The base and limits are checked.
10922 * @param GCPtrMem The address of the guest memory.
10923 */
10924IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10925{
10926 /* The lazy approach for now... */
10927 uint32_t const *pu32Src;
10928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10929 if (rc == VINF_SUCCESS)
10930 {
10931 *pu32Dst = *pu32Src;
10932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10933 }
10934 return rc;
10935}
10936
10937
10938/**
10939 * Fetches a system table qword.
10940 *
10941 * @returns Strict VBox status code.
10942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10943 * @param pu64Dst Where to return the qword.
10944 * @param iSegReg The index of the segment register to use for
10945 * this access. The base and limits are checked.
10946 * @param GCPtrMem The address of the guest memory.
10947 */
10948IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10949{
10950 /* The lazy approach for now... */
10951 uint64_t const *pu64Src;
10952 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10953 if (rc == VINF_SUCCESS)
10954 {
10955 *pu64Dst = *pu64Src;
10956 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10957 }
10958 return rc;
10959}
10960
10961
10962/**
10963 * Fetches a descriptor table entry with caller specified error code.
10964 *
10965 * @returns Strict VBox status code.
10966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10967 * @param pDesc Where to return the descriptor table entry.
10968 * @param uSel The selector which table entry to fetch.
10969 * @param uXcpt The exception to raise on table lookup error.
10970 * @param uErrorCode The error code associated with the exception.
10971 */
10972IEM_STATIC VBOXSTRICTRC
10973iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10974{
10975 AssertPtr(pDesc);
10976 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10977
10978 /** @todo did the 286 require all 8 bytes to be accessible? */
10979 /*
10980 * Get the selector table base and check bounds.
10981 */
10982 RTGCPTR GCPtrBase;
10983 if (uSel & X86_SEL_LDT)
10984 {
10985 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10986 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10987 {
10988 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10989 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10990 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10991 uErrorCode, 0);
10992 }
10993
10994 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10995 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10996 }
10997 else
10998 {
10999 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
11000 {
11001 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11002 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11003 uErrorCode, 0);
11004 }
11005 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11006 }
11007
11008 /*
11009 * Read the legacy descriptor and maybe the long mode extensions if
11010 * required.
11011 */
11012 VBOXSTRICTRC rcStrict;
11013 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11014 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11015 else
11016 {
11017 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11018 if (rcStrict == VINF_SUCCESS)
11019 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11020 if (rcStrict == VINF_SUCCESS)
11021 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11022 if (rcStrict == VINF_SUCCESS)
11023 pDesc->Legacy.au16[3] = 0;
11024 else
11025 return rcStrict;
11026 }
11027
11028 if (rcStrict == VINF_SUCCESS)
11029 {
11030 if ( !IEM_IS_LONG_MODE(pVCpu)
11031 || pDesc->Legacy.Gen.u1DescType)
11032 pDesc->Long.au64[1] = 0;
11033 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11034 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11035 else
11036 {
11037 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11038 /** @todo is this the right exception? */
11039 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11040 }
11041 }
11042 return rcStrict;
11043}
11044
11045
11046/**
11047 * Fetches a descriptor table entry.
11048 *
11049 * @returns Strict VBox status code.
11050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11051 * @param pDesc Where to return the descriptor table entry.
11052 * @param uSel The selector which table entry to fetch.
11053 * @param uXcpt The exception to raise on table lookup error.
11054 */
11055IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11056{
11057 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11058}
11059
11060
11061/**
11062 * Fakes a long mode stack selector for SS = 0.
11063 *
11064 * @param pDescSs Where to return the fake stack descriptor.
11065 * @param uDpl The DPL we want.
11066 */
11067IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11068{
11069 pDescSs->Long.au64[0] = 0;
11070 pDescSs->Long.au64[1] = 0;
11071 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11072 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11073 pDescSs->Long.Gen.u2Dpl = uDpl;
11074 pDescSs->Long.Gen.u1Present = 1;
11075 pDescSs->Long.Gen.u1Long = 1;
11076}
11077
11078
11079/**
11080 * Marks the selector descriptor as accessed (only non-system descriptors).
11081 *
11082 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11083 * will therefore skip the limit checks.
11084 *
11085 * @returns Strict VBox status code.
11086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11087 * @param uSel The selector.
11088 */
11089IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11090{
11091 /*
11092 * Get the selector table base and calculate the entry address.
11093 */
11094 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11095 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11096 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11097 GCPtr += uSel & X86_SEL_MASK;
11098
11099 /*
11100 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11101 * ugly stuff to avoid this. This will make sure it's an atomic access
11102 * as well more or less remove any question about 8-bit or 32-bit accesss.
11103 */
11104 VBOXSTRICTRC rcStrict;
11105 uint32_t volatile *pu32;
11106 if ((GCPtr & 3) == 0)
11107 {
11108 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11109 GCPtr += 2 + 2;
11110 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11111 if (rcStrict != VINF_SUCCESS)
11112 return rcStrict;
11113 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11114 }
11115 else
11116 {
11117 /* The misaligned GDT/LDT case, map the whole thing. */
11118 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11119 if (rcStrict != VINF_SUCCESS)
11120 return rcStrict;
11121 switch ((uintptr_t)pu32 & 3)
11122 {
11123 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11124 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11125 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11126 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11127 }
11128 }
11129
11130 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11131}
11132
11133/** @} */
11134
11135
11136/*
11137 * Include the C/C++ implementation of instruction.
11138 */
11139#include "IEMAllCImpl.cpp.h"
11140
11141
11142
11143/** @name "Microcode" macros.
11144 *
11145 * The idea is that we should be able to use the same code to interpret
11146 * instructions as well as recompiler instructions. Thus this obfuscation.
11147 *
11148 * @{
11149 */
11150#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11151#define IEM_MC_END() }
11152#define IEM_MC_PAUSE() do {} while (0)
11153#define IEM_MC_CONTINUE() do {} while (0)
11154
11155/** Internal macro. */
11156#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11157 do \
11158 { \
11159 VBOXSTRICTRC rcStrict2 = a_Expr; \
11160 if (rcStrict2 != VINF_SUCCESS) \
11161 return rcStrict2; \
11162 } while (0)
11163
11164
11165#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11166#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11167#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11168#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11169#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11170#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11171#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11172#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11173#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11174 do { \
11175 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11176 return iemRaiseDeviceNotAvailable(pVCpu); \
11177 } while (0)
11178#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11179 do { \
11180 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11181 return iemRaiseDeviceNotAvailable(pVCpu); \
11182 } while (0)
11183#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11184 do { \
11185 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11186 return iemRaiseMathFault(pVCpu); \
11187 } while (0)
11188#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11189 do { \
11190 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11191 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11192 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11193 return iemRaiseUndefinedOpcode(pVCpu); \
11194 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11195 return iemRaiseDeviceNotAvailable(pVCpu); \
11196 } while (0)
11197#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11198 do { \
11199 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11200 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11201 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11202 return iemRaiseUndefinedOpcode(pVCpu); \
11203 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11204 return iemRaiseDeviceNotAvailable(pVCpu); \
11205 } while (0)
11206#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11207 do { \
11208 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11209 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11210 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11211 return iemRaiseUndefinedOpcode(pVCpu); \
11212 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11213 return iemRaiseDeviceNotAvailable(pVCpu); \
11214 } while (0)
11215#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11216 do { \
11217 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11218 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11219 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11220 return iemRaiseUndefinedOpcode(pVCpu); \
11221 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11222 return iemRaiseDeviceNotAvailable(pVCpu); \
11223 } while (0)
11224#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11225 do { \
11226 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11227 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11228 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11229 return iemRaiseUndefinedOpcode(pVCpu); \
11230 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11231 return iemRaiseDeviceNotAvailable(pVCpu); \
11232 } while (0)
11233#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11234 do { \
11235 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11236 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11237 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11238 return iemRaiseUndefinedOpcode(pVCpu); \
11239 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11240 return iemRaiseDeviceNotAvailable(pVCpu); \
11241 } while (0)
11242#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11243 do { \
11244 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11245 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11246 return iemRaiseUndefinedOpcode(pVCpu); \
11247 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11248 return iemRaiseDeviceNotAvailable(pVCpu); \
11249 } while (0)
11250#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11251 do { \
11252 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11253 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11254 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11255 return iemRaiseUndefinedOpcode(pVCpu); \
11256 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11257 return iemRaiseDeviceNotAvailable(pVCpu); \
11258 } while (0)
11259#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11260 do { \
11261 if (pVCpu->iem.s.uCpl != 0) \
11262 return iemRaiseGeneralProtectionFault0(pVCpu); \
11263 } while (0)
11264#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11265 do { \
11266 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11267 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11268 } while (0)
11269#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11270 do { \
11271 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11272 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11273 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11274 return iemRaiseUndefinedOpcode(pVCpu); \
11275 } while (0)
11276#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11277 do { \
11278 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11279 return iemRaiseGeneralProtectionFault0(pVCpu); \
11280 } while (0)
11281
11282
11283#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11284#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11285#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11286#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11287#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11288#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11289#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11290 uint32_t a_Name; \
11291 uint32_t *a_pName = &a_Name
11292#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11293 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11294
11295#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11296#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11297
11298#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11299#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11300#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11315#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11316 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11317 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11318 } while (0)
11319#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11320 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11321 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11322 } while (0)
11323#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11324 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11325 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11326 } while (0)
11327/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11328#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11329 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11330 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11331 } while (0)
11332#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11333 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11334 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11335 } while (0)
11336/** @note Not for IOPL or IF testing or modification. */
11337#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11338#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11339#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11340#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11341
11342#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11343#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11344#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11345#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11346#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11347#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11348#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11349#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11350#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11351#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11352/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11353#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11354 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11355 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11356 } while (0)
11357#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11358 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11359 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11360 } while (0)
11361#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11362 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11363
11364
11365#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11366#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11367/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11368 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11369#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11370#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11371/** @note Not for IOPL or IF testing or modification. */
11372#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11373
11374#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11375#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11376#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11377 do { \
11378 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11379 *pu32Reg += (a_u32Value); \
11380 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11381 } while (0)
11382#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11383
11384#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11385#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11386#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11387 do { \
11388 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11389 *pu32Reg -= (a_u32Value); \
11390 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11391 } while (0)
11392#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11393#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11394
11395#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11396#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11397#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11398#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11399#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11400#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11401#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11402
11403#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11404#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11405#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11406#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11407
11408#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11409#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11410#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11411
11412#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11413#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11414#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11415
11416#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11417#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11418#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11419
11420#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11421#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11422#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11423
11424#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11425
11426#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11427
11428#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11429#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11430#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11431 do { \
11432 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11433 *pu32Reg &= (a_u32Value); \
11434 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11435 } while (0)
11436#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11437
11438#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11439#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11440#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11441 do { \
11442 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11443 *pu32Reg |= (a_u32Value); \
11444 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11445 } while (0)
11446#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11447
11448
11449/** @note Not for IOPL or IF modification. */
11450#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11451/** @note Not for IOPL or IF modification. */
11452#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11453/** @note Not for IOPL or IF modification. */
11454#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11455
11456#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11457
11458/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11459#define IEM_MC_FPU_TO_MMX_MODE() do { \
11460 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11461 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11462 } while (0)
11463
11464/** Switches the FPU state from MMX mode (FTW=0xffff). */
11465#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11466 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11467 } while (0)
11468
11469#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11470 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11471#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11472 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11473#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11474 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11475 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11476 } while (0)
11477#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11478 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11479 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11480 } while (0)
11481#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11482 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11483#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11484 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11485#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11486 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11487
11488#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11489 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11490 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11491 } while (0)
11492#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11493 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11494#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11495 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11496#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11497 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11498#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11499 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11500 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11501 } while (0)
11502#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11503 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11504#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11505 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11506 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11507 } while (0)
11508#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11509 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11510#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11511 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11512 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11513 } while (0)
11514#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11515 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11516#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11517 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11518#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11519 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11520#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11521 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11522#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11523 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11524 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11525 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11526 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11527 } while (0)
11528
11529#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11530 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11531 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11532 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11533 } while (0)
11534#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11535 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11536 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11537 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11538 } while (0)
11539#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11540 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11541 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11542 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11543 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11544 } while (0)
11545#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11546 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11547 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11548 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11549 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11550 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11551 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11552 } while (0)
11553
11554#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11555#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11556 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11557 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11558 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11559 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11560 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11561 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11562 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11563 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11564 } while (0)
11565#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11566 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11567 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11568 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11569 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11570 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11571 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11572 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11573 } while (0)
11574#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11575 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11576 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11577 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11578 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11580 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11581 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11582 } while (0)
11583#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11584 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11585 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11586 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11587 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11589 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11590 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11591 } while (0)
11592
11593#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11594 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11595#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11596 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11597#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11598 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11599#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11600 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11601 uintptr_t const iYRegTmp = (a_iYReg); \
11602 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11603 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11604 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11605 } while (0)
11606
11607#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11608 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11609 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11610 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11611 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11612 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11613 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11614 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11615 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11616 } while (0)
11617#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11618 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11619 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11620 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11621 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11622 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11623 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11624 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11625 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11626 } while (0)
11627#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11628 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11629 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11630 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11631 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11632 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11633 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11634 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11635 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11636 } while (0)
11637
11638#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11639 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11640 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11641 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11642 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11643 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11644 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11645 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11646 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11647 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11648 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11649 } while (0)
11650#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11651 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11652 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11653 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11654 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11655 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11656 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11657 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11658 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11659 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11660 } while (0)
11661#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11662 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11663 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11664 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11665 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11666 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11667 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11668 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11669 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11670 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11671 } while (0)
11672#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11673 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11674 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11675 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11676 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11677 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11678 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11679 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11680 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11681 } while (0)
11682
11683#ifndef IEM_WITH_SETJMP
11684# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11686# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11688# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11690#else
11691# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11692 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11693# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11694 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11695# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11696 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11697#endif
11698
11699#ifndef IEM_WITH_SETJMP
11700# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11702# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11703 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11704# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11706#else
11707# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11708 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11709# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11710 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11711# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11712 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11713#endif
11714
11715#ifndef IEM_WITH_SETJMP
11716# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11718# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11720# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11722#else
11723# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11724 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11725# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11726 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11727# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11728 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11729#endif
11730
11731#ifdef SOME_UNUSED_FUNCTION
11732# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11734#endif
11735
11736#ifndef IEM_WITH_SETJMP
11737# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11739# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11741# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11743# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11745#else
11746# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11747 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11749 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11750# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11751 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11752# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11753 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11754#endif
11755
11756#ifndef IEM_WITH_SETJMP
11757# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11759# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11760 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11761# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11763#else
11764# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11765 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11766# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11767 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11768# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11769 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11770#endif
11771
11772#ifndef IEM_WITH_SETJMP
11773# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11775# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11776 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11777#else
11778# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11779 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11780# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11781 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11782#endif
11783
11784#ifndef IEM_WITH_SETJMP
11785# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11787# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11789#else
11790# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11791 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11792# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11793 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11794#endif
11795
11796
11797
11798#ifndef IEM_WITH_SETJMP
11799# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11800 do { \
11801 uint8_t u8Tmp; \
11802 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11803 (a_u16Dst) = u8Tmp; \
11804 } while (0)
11805# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11806 do { \
11807 uint8_t u8Tmp; \
11808 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11809 (a_u32Dst) = u8Tmp; \
11810 } while (0)
11811# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11812 do { \
11813 uint8_t u8Tmp; \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11815 (a_u64Dst) = u8Tmp; \
11816 } while (0)
11817# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11818 do { \
11819 uint16_t u16Tmp; \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11821 (a_u32Dst) = u16Tmp; \
11822 } while (0)
11823# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11824 do { \
11825 uint16_t u16Tmp; \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11827 (a_u64Dst) = u16Tmp; \
11828 } while (0)
11829# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11830 do { \
11831 uint32_t u32Tmp; \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11833 (a_u64Dst) = u32Tmp; \
11834 } while (0)
11835#else /* IEM_WITH_SETJMP */
11836# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11837 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11838# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11839 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11840# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11841 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11842# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11843 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11844# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11845 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11846# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11847 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11848#endif /* IEM_WITH_SETJMP */
11849
11850#ifndef IEM_WITH_SETJMP
11851# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11852 do { \
11853 uint8_t u8Tmp; \
11854 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11855 (a_u16Dst) = (int8_t)u8Tmp; \
11856 } while (0)
11857# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11858 do { \
11859 uint8_t u8Tmp; \
11860 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11861 (a_u32Dst) = (int8_t)u8Tmp; \
11862 } while (0)
11863# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11864 do { \
11865 uint8_t u8Tmp; \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11867 (a_u64Dst) = (int8_t)u8Tmp; \
11868 } while (0)
11869# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11870 do { \
11871 uint16_t u16Tmp; \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11873 (a_u32Dst) = (int16_t)u16Tmp; \
11874 } while (0)
11875# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11876 do { \
11877 uint16_t u16Tmp; \
11878 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11879 (a_u64Dst) = (int16_t)u16Tmp; \
11880 } while (0)
11881# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11882 do { \
11883 uint32_t u32Tmp; \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11885 (a_u64Dst) = (int32_t)u32Tmp; \
11886 } while (0)
11887#else /* IEM_WITH_SETJMP */
11888# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11889 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11890# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11891 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11892# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11893 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11894# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11895 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11896# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11897 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11898# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11899 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11900#endif /* IEM_WITH_SETJMP */
11901
11902#ifndef IEM_WITH_SETJMP
11903# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11904 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11905# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11906 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11907# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11908 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11909# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11911#else
11912# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11913 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11914# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11915 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11916# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11917 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11918# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11919 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11920#endif
11921
11922#ifndef IEM_WITH_SETJMP
11923# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11924 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11925# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11926 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11927# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11928 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11929# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11930 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11931#else
11932# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11933 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11934# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11935 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11936# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11937 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11938# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11939 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11940#endif
11941
11942#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11943#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11944#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11945#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11946#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11947#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11948#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11949 do { \
11950 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11951 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11952 } while (0)
11953
11954#ifndef IEM_WITH_SETJMP
11955# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11956 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11957# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11958 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11959#else
11960# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11961 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11962# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11963 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11964#endif
11965
11966#ifndef IEM_WITH_SETJMP
11967# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11968 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11969# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11970 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11971#else
11972# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11973 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11974# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11975 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11976#endif
11977
11978
11979#define IEM_MC_PUSH_U16(a_u16Value) \
11980 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11981#define IEM_MC_PUSH_U32(a_u32Value) \
11982 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11983#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11984 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11985#define IEM_MC_PUSH_U64(a_u64Value) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11987
11988#define IEM_MC_POP_U16(a_pu16Value) \
11989 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11990#define IEM_MC_POP_U32(a_pu32Value) \
11991 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11992#define IEM_MC_POP_U64(a_pu64Value) \
11993 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11994
11995/** Maps guest memory for direct or bounce buffered access.
11996 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11997 * @remarks May return.
11998 */
11999#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12000 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12001
12002/** Maps guest memory for direct or bounce buffered access.
12003 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12004 * @remarks May return.
12005 */
12006#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12007 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12008
12009/** Commits the memory and unmaps the guest memory.
12010 * @remarks May return.
12011 */
12012#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12013 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12014
12015/** Commits the memory and unmaps the guest memory unless the FPU status word
12016 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12017 * that would cause FLD not to store.
12018 *
12019 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12020 * store, while \#P will not.
12021 *
12022 * @remarks May in theory return - for now.
12023 */
12024#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12025 do { \
12026 if ( !(a_u16FSW & X86_FSW_ES) \
12027 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12028 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12029 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12030 } while (0)
12031
12032/** Calculate efficient address from R/M. */
12033#ifndef IEM_WITH_SETJMP
12034# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12035 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12036#else
12037# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12038 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12039#endif
12040
12041#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12042#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12043#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12044#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12045#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12046#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12047#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12048
12049/**
12050 * Defers the rest of the instruction emulation to a C implementation routine
12051 * and returns, only taking the standard parameters.
12052 *
12053 * @param a_pfnCImpl The pointer to the C routine.
12054 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12055 */
12056#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12057
12058/**
12059 * Defers the rest of instruction emulation to a C implementation routine and
12060 * returns, taking one argument in addition to the standard ones.
12061 *
12062 * @param a_pfnCImpl The pointer to the C routine.
12063 * @param a0 The argument.
12064 */
12065#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12066
12067/**
12068 * Defers the rest of the instruction emulation to a C implementation routine
12069 * and returns, taking two arguments in addition to the standard ones.
12070 *
12071 * @param a_pfnCImpl The pointer to the C routine.
12072 * @param a0 The first extra argument.
12073 * @param a1 The second extra argument.
12074 */
12075#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12076
12077/**
12078 * Defers the rest of the instruction emulation to a C implementation routine
12079 * and returns, taking three arguments in addition to the standard ones.
12080 *
12081 * @param a_pfnCImpl The pointer to the C routine.
12082 * @param a0 The first extra argument.
12083 * @param a1 The second extra argument.
12084 * @param a2 The third extra argument.
12085 */
12086#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12087
12088/**
12089 * Defers the rest of the instruction emulation to a C implementation routine
12090 * and returns, taking four arguments in addition to the standard ones.
12091 *
12092 * @param a_pfnCImpl The pointer to the C routine.
12093 * @param a0 The first extra argument.
12094 * @param a1 The second extra argument.
12095 * @param a2 The third extra argument.
12096 * @param a3 The fourth extra argument.
12097 */
12098#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12099
12100/**
12101 * Defers the rest of the instruction emulation to a C implementation routine
12102 * and returns, taking two arguments in addition to the standard ones.
12103 *
12104 * @param a_pfnCImpl The pointer to the C routine.
12105 * @param a0 The first extra argument.
12106 * @param a1 The second extra argument.
12107 * @param a2 The third extra argument.
12108 * @param a3 The fourth extra argument.
12109 * @param a4 The fifth extra argument.
12110 */
12111#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12112
12113/**
12114 * Defers the entire instruction emulation to a C implementation routine and
12115 * returns, only taking the standard parameters.
12116 *
12117 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12118 *
12119 * @param a_pfnCImpl The pointer to the C routine.
12120 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12121 */
12122#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12123
12124/**
12125 * Defers the entire instruction emulation to a C implementation routine and
12126 * returns, taking one argument in addition to the standard ones.
12127 *
12128 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12129 *
12130 * @param a_pfnCImpl The pointer to the C routine.
12131 * @param a0 The argument.
12132 */
12133#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12134
12135/**
12136 * Defers the entire instruction emulation to a C implementation routine and
12137 * returns, taking two arguments in addition to the standard ones.
12138 *
12139 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12140 *
12141 * @param a_pfnCImpl The pointer to the C routine.
12142 * @param a0 The first extra argument.
12143 * @param a1 The second extra argument.
12144 */
12145#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12146
12147/**
12148 * Defers the entire instruction emulation to a C implementation routine and
12149 * returns, taking three arguments in addition to the standard ones.
12150 *
12151 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12152 *
12153 * @param a_pfnCImpl The pointer to the C routine.
12154 * @param a0 The first extra argument.
12155 * @param a1 The second extra argument.
12156 * @param a2 The third extra argument.
12157 */
12158#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12159
12160/**
12161 * Calls a FPU assembly implementation taking one visible argument.
12162 *
12163 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12164 * @param a0 The first extra argument.
12165 */
12166#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12167 do { \
12168 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12169 } while (0)
12170
12171/**
12172 * Calls a FPU assembly implementation taking two visible arguments.
12173 *
12174 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12175 * @param a0 The first extra argument.
12176 * @param a1 The second extra argument.
12177 */
12178#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12179 do { \
12180 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12181 } while (0)
12182
12183/**
12184 * Calls a FPU assembly implementation taking three visible arguments.
12185 *
12186 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12187 * @param a0 The first extra argument.
12188 * @param a1 The second extra argument.
12189 * @param a2 The third extra argument.
12190 */
12191#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12192 do { \
12193 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12194 } while (0)
12195
12196#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12197 do { \
12198 (a_FpuData).FSW = (a_FSW); \
12199 (a_FpuData).r80Result = *(a_pr80Value); \
12200 } while (0)
12201
12202/** Pushes FPU result onto the stack. */
12203#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12204 iemFpuPushResult(pVCpu, &a_FpuData)
12205/** Pushes FPU result onto the stack and sets the FPUDP. */
12206#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12207 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12208
12209/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12210#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12211 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12212
12213/** Stores FPU result in a stack register. */
12214#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12215 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12216/** Stores FPU result in a stack register and pops the stack. */
12217#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12218 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12219/** Stores FPU result in a stack register and sets the FPUDP. */
12220#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12221 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12222/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12223 * stack. */
12224#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12225 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12226
12227/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12228#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12229 iemFpuUpdateOpcodeAndIp(pVCpu)
12230/** Free a stack register (for FFREE and FFREEP). */
12231#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12232 iemFpuStackFree(pVCpu, a_iStReg)
12233/** Increment the FPU stack pointer. */
12234#define IEM_MC_FPU_STACK_INC_TOP() \
12235 iemFpuStackIncTop(pVCpu)
12236/** Decrement the FPU stack pointer. */
12237#define IEM_MC_FPU_STACK_DEC_TOP() \
12238 iemFpuStackDecTop(pVCpu)
12239
12240/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12241#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12242 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12243/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12244#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12245 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12246/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12247#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12248 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12249/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12250#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12251 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12252/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12253 * stack. */
12254#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12255 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12256/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12257#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12258 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12259
12260/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12261#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12262 iemFpuStackUnderflow(pVCpu, a_iStDst)
12263/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12264 * stack. */
12265#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12266 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12267/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12268 * FPUDS. */
12269#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12270 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12271/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12272 * FPUDS. Pops stack. */
12273#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12274 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12275/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12276 * stack twice. */
12277#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12278 iemFpuStackUnderflowThenPopPop(pVCpu)
12279/** Raises a FPU stack underflow exception for an instruction pushing a result
12280 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12281#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12282 iemFpuStackPushUnderflow(pVCpu)
12283/** Raises a FPU stack underflow exception for an instruction pushing a result
12284 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12285#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12286 iemFpuStackPushUnderflowTwo(pVCpu)
12287
12288/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12289 * FPUIP, FPUCS and FOP. */
12290#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12291 iemFpuStackPushOverflow(pVCpu)
12292/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12293 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12294#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12295 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12296/** Prepares for using the FPU state.
12297 * Ensures that we can use the host FPU in the current context (RC+R0.
12298 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12299#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12300/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12301#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12302/** Actualizes the guest FPU state so it can be accessed and modified. */
12303#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12304
12305/** Prepares for using the SSE state.
12306 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12307 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12308#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12309/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12310#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12311/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12312#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12313
12314/** Prepares for using the AVX state.
12315 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12316 * Ensures the guest AVX state in the CPUMCTX is up to date.
12317 * @note This will include the AVX512 state too when support for it is added
12318 * due to the zero extending feature of VEX instruction. */
12319#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12320/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12321#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12322/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12323#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12324
12325/**
12326 * Calls a MMX assembly implementation taking two visible arguments.
12327 *
12328 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12329 * @param a0 The first extra argument.
12330 * @param a1 The second extra argument.
12331 */
12332#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12333 do { \
12334 IEM_MC_PREPARE_FPU_USAGE(); \
12335 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12336 } while (0)
12337
12338/**
12339 * Calls a MMX assembly implementation taking three visible arguments.
12340 *
12341 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12342 * @param a0 The first extra argument.
12343 * @param a1 The second extra argument.
12344 * @param a2 The third extra argument.
12345 */
12346#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12347 do { \
12348 IEM_MC_PREPARE_FPU_USAGE(); \
12349 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12350 } while (0)
12351
12352
12353/**
12354 * Calls a SSE assembly implementation taking two visible arguments.
12355 *
12356 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12357 * @param a0 The first extra argument.
12358 * @param a1 The second extra argument.
12359 */
12360#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12361 do { \
12362 IEM_MC_PREPARE_SSE_USAGE(); \
12363 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12364 } while (0)
12365
12366/**
12367 * Calls a SSE assembly implementation taking three visible arguments.
12368 *
12369 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12370 * @param a0 The first extra argument.
12371 * @param a1 The second extra argument.
12372 * @param a2 The third extra argument.
12373 */
12374#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12375 do { \
12376 IEM_MC_PREPARE_SSE_USAGE(); \
12377 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12378 } while (0)
12379
12380
12381/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12382 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12383#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12384 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12385
12386/**
12387 * Calls a AVX assembly implementation taking two visible arguments.
12388 *
12389 * There is one implicit zero'th argument, a pointer to the extended state.
12390 *
12391 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12392 * @param a1 The first extra argument.
12393 * @param a2 The second extra argument.
12394 */
12395#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12396 do { \
12397 IEM_MC_PREPARE_AVX_USAGE(); \
12398 a_pfnAImpl(pXState, (a1), (a2)); \
12399 } while (0)
12400
12401/**
12402 * Calls a AVX assembly implementation taking three visible arguments.
12403 *
12404 * There is one implicit zero'th argument, a pointer to the extended state.
12405 *
12406 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12407 * @param a1 The first extra argument.
12408 * @param a2 The second extra argument.
12409 * @param a3 The third extra argument.
12410 */
12411#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12412 do { \
12413 IEM_MC_PREPARE_AVX_USAGE(); \
12414 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12415 } while (0)
12416
12417/** @note Not for IOPL or IF testing. */
12418#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12421/** @note Not for IOPL or IF testing. */
12422#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12423/** @note Not for IOPL or IF testing. */
12424#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12425/** @note Not for IOPL or IF testing. */
12426#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12427 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12428 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12429/** @note Not for IOPL or IF testing. */
12430#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12431 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12432 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12433/** @note Not for IOPL or IF testing. */
12434#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12435 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12436 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12437 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12438/** @note Not for IOPL or IF testing. */
12439#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12440 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12441 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12442 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12443#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12444#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12445#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12446/** @note Not for IOPL or IF testing. */
12447#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12448 if ( pVCpu->cpum.GstCtx.cx != 0 \
12449 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12450/** @note Not for IOPL or IF testing. */
12451#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12452 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12453 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12454/** @note Not for IOPL or IF testing. */
12455#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12456 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12457 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12458/** @note Not for IOPL or IF testing. */
12459#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12460 if ( pVCpu->cpum.GstCtx.cx != 0 \
12461 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12462/** @note Not for IOPL or IF testing. */
12463#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12464 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12465 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12466/** @note Not for IOPL or IF testing. */
12467#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12468 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12469 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12470#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12471#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12472
12473#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12474 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12475#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12476 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12477#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12478 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12479#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12480 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12481#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12482 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12483#define IEM_MC_IF_FCW_IM() \
12484 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12485
12486#define IEM_MC_ELSE() } else {
12487#define IEM_MC_ENDIF() } do {} while (0)
12488
12489/** @} */
12490
12491
12492/** @name Opcode Debug Helpers.
12493 * @{
12494 */
12495#ifdef VBOX_WITH_STATISTICS
12496# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12497#else
12498# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12499#endif
12500
12501#ifdef DEBUG
12502# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12503 do { \
12504 IEMOP_INC_STATS(a_Stats); \
12505 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12506 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12507 } while (0)
12508
12509# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12510 do { \
12511 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12512 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12513 (void)RT_CONCAT(OP_,a_Upper); \
12514 (void)(a_fDisHints); \
12515 (void)(a_fIemHints); \
12516 } while (0)
12517
12518# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12519 do { \
12520 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12521 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12522 (void)RT_CONCAT(OP_,a_Upper); \
12523 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12524 (void)(a_fDisHints); \
12525 (void)(a_fIemHints); \
12526 } while (0)
12527
12528# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12529 do { \
12530 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12531 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12532 (void)RT_CONCAT(OP_,a_Upper); \
12533 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12534 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12535 (void)(a_fDisHints); \
12536 (void)(a_fIemHints); \
12537 } while (0)
12538
12539# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12540 do { \
12541 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12542 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12543 (void)RT_CONCAT(OP_,a_Upper); \
12544 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12545 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12546 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12547 (void)(a_fDisHints); \
12548 (void)(a_fIemHints); \
12549 } while (0)
12550
12551# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12552 do { \
12553 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12554 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12555 (void)RT_CONCAT(OP_,a_Upper); \
12556 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12557 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12558 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12559 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12560 (void)(a_fDisHints); \
12561 (void)(a_fIemHints); \
12562 } while (0)
12563
12564#else
12565# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12566
12567# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12568 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12569# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12570 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12571# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12572 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12573# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12574 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12575# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12576 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12577
12578#endif
12579
12580#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12581 IEMOP_MNEMONIC0EX(a_Lower, \
12582 #a_Lower, \
12583 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12584#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12585 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12586 #a_Lower " " #a_Op1, \
12587 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12588#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12589 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12590 #a_Lower " " #a_Op1 "," #a_Op2, \
12591 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12592#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12593 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12594 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12595 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12596#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12597 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12598 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12599 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12600
12601/** @} */
12602
12603
12604/** @name Opcode Helpers.
12605 * @{
12606 */
12607
12608#ifdef IN_RING3
12609# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12610 do { \
12611 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12612 else \
12613 { \
12614 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12615 return IEMOP_RAISE_INVALID_OPCODE(); \
12616 } \
12617 } while (0)
12618#else
12619# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12620 do { \
12621 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12622 else return IEMOP_RAISE_INVALID_OPCODE(); \
12623 } while (0)
12624#endif
12625
12626/** The instruction requires a 186 or later. */
12627#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12628# define IEMOP_HLP_MIN_186() do { } while (0)
12629#else
12630# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12631#endif
12632
12633/** The instruction requires a 286 or later. */
12634#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12635# define IEMOP_HLP_MIN_286() do { } while (0)
12636#else
12637# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12638#endif
12639
12640/** The instruction requires a 386 or later. */
12641#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12642# define IEMOP_HLP_MIN_386() do { } while (0)
12643#else
12644# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12645#endif
12646
12647/** The instruction requires a 386 or later if the given expression is true. */
12648#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12649# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12650#else
12651# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12652#endif
12653
12654/** The instruction requires a 486 or later. */
12655#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12656# define IEMOP_HLP_MIN_486() do { } while (0)
12657#else
12658# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12659#endif
12660
12661/** The instruction requires a Pentium (586) or later. */
12662#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12663# define IEMOP_HLP_MIN_586() do { } while (0)
12664#else
12665# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12666#endif
12667
12668/** The instruction requires a PentiumPro (686) or later. */
12669#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12670# define IEMOP_HLP_MIN_686() do { } while (0)
12671#else
12672# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12673#endif
12674
12675
12676/** The instruction raises an \#UD in real and V8086 mode. */
12677#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12678 do \
12679 { \
12680 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12681 else return IEMOP_RAISE_INVALID_OPCODE(); \
12682 } while (0)
12683
12684#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12685/** This instruction raises an \#UD in real and V8086 mode or when not using a
12686 * 64-bit code segment when in long mode (applicable to all VMX instructions
12687 * except VMCALL).
12688 */
12689#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12690 do \
12691 { \
12692 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12693 && ( !IEM_IS_LONG_MODE(pVCpu) \
12694 || IEM_IS_64BIT_CODE(pVCpu))) \
12695 { /* likely */ } \
12696 else \
12697 { \
12698 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12699 { \
12700 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12701 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12702 return IEMOP_RAISE_INVALID_OPCODE(); \
12703 } \
12704 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12705 { \
12706 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12707 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12708 return IEMOP_RAISE_INVALID_OPCODE(); \
12709 } \
12710 } \
12711 } while (0)
12712
12713/** The instruction can only be executed in VMX operation (VMX root mode and
12714 * non-root mode).
12715 *
12716 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12717 */
12718# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12719 do \
12720 { \
12721 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12722 else \
12723 { \
12724 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12725 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12726 return IEMOP_RAISE_INVALID_OPCODE(); \
12727 } \
12728 } while (0)
12729#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12730
12731/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12732 * 64-bit mode. */
12733#define IEMOP_HLP_NO_64BIT() \
12734 do \
12735 { \
12736 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12737 return IEMOP_RAISE_INVALID_OPCODE(); \
12738 } while (0)
12739
12740/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12741 * 64-bit mode. */
12742#define IEMOP_HLP_ONLY_64BIT() \
12743 do \
12744 { \
12745 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12746 return IEMOP_RAISE_INVALID_OPCODE(); \
12747 } while (0)
12748
12749/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12750#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12751 do \
12752 { \
12753 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12754 iemRecalEffOpSize64Default(pVCpu); \
12755 } while (0)
12756
12757/** The instruction has 64-bit operand size if 64-bit mode. */
12758#define IEMOP_HLP_64BIT_OP_SIZE() \
12759 do \
12760 { \
12761 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12762 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12763 } while (0)
12764
12765/** Only a REX prefix immediately preceeding the first opcode byte takes
12766 * effect. This macro helps ensuring this as well as logging bad guest code. */
12767#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12768 do \
12769 { \
12770 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12771 { \
12772 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12773 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12774 pVCpu->iem.s.uRexB = 0; \
12775 pVCpu->iem.s.uRexIndex = 0; \
12776 pVCpu->iem.s.uRexReg = 0; \
12777 iemRecalEffOpSize(pVCpu); \
12778 } \
12779 } while (0)
12780
12781/**
12782 * Done decoding.
12783 */
12784#define IEMOP_HLP_DONE_DECODING() \
12785 do \
12786 { \
12787 /*nothing for now, maybe later... */ \
12788 } while (0)
12789
12790/**
12791 * Done decoding, raise \#UD exception if lock prefix present.
12792 */
12793#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12794 do \
12795 { \
12796 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12797 { /* likely */ } \
12798 else \
12799 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12800 } while (0)
12801
12802
12803/**
12804 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12805 * repnz or size prefixes are present, or if in real or v8086 mode.
12806 */
12807#define IEMOP_HLP_DONE_VEX_DECODING() \
12808 do \
12809 { \
12810 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12811 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12812 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12813 { /* likely */ } \
12814 else \
12815 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12816 } while (0)
12817
12818/**
12819 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12820 * repnz or size prefixes are present, or if in real or v8086 mode.
12821 */
12822#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12823 do \
12824 { \
12825 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12826 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12827 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12828 && pVCpu->iem.s.uVexLength == 0)) \
12829 { /* likely */ } \
12830 else \
12831 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12832 } while (0)
12833
12834
12835/**
12836 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12837 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12838 * register 0, or if in real or v8086 mode.
12839 */
12840#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12841 do \
12842 { \
12843 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12844 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12845 && !pVCpu->iem.s.uVex3rdReg \
12846 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12847 { /* likely */ } \
12848 else \
12849 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12850 } while (0)
12851
12852/**
12853 * Done decoding VEX, no V, L=0.
12854 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12855 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12856 */
12857#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12858 do \
12859 { \
12860 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12861 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12862 && pVCpu->iem.s.uVexLength == 0 \
12863 && pVCpu->iem.s.uVex3rdReg == 0 \
12864 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12865 { /* likely */ } \
12866 else \
12867 return IEMOP_RAISE_INVALID_OPCODE(); \
12868 } while (0)
12869
12870#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12871 do \
12872 { \
12873 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12874 { /* likely */ } \
12875 else \
12876 { \
12877 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12878 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12879 } \
12880 } while (0)
12881#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12882 do \
12883 { \
12884 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12885 { /* likely */ } \
12886 else \
12887 { \
12888 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12889 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12890 } \
12891 } while (0)
12892
12893/**
12894 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12895 * are present.
12896 */
12897#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12898 do \
12899 { \
12900 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12901 { /* likely */ } \
12902 else \
12903 return IEMOP_RAISE_INVALID_OPCODE(); \
12904 } while (0)
12905
12906/**
12907 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12908 * prefixes are present.
12909 */
12910#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12911 do \
12912 { \
12913 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12914 { /* likely */ } \
12915 else \
12916 return IEMOP_RAISE_INVALID_OPCODE(); \
12917 } while (0)
12918
12919
12920/**
12921 * Calculates the effective address of a ModR/M memory operand.
12922 *
12923 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12924 *
12925 * @return Strict VBox status code.
12926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12927 * @param bRm The ModRM byte.
12928 * @param cbImm The size of any immediate following the
12929 * effective address opcode bytes. Important for
12930 * RIP relative addressing.
12931 * @param pGCPtrEff Where to return the effective address.
12932 */
12933IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12934{
12935 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12936# define SET_SS_DEF() \
12937 do \
12938 { \
12939 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12940 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12941 } while (0)
12942
12943 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12944 {
12945/** @todo Check the effective address size crap! */
12946 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12947 {
12948 uint16_t u16EffAddr;
12949
12950 /* Handle the disp16 form with no registers first. */
12951 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12952 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12953 else
12954 {
12955 /* Get the displacment. */
12956 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12957 {
12958 case 0: u16EffAddr = 0; break;
12959 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12960 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12961 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12962 }
12963
12964 /* Add the base and index registers to the disp. */
12965 switch (bRm & X86_MODRM_RM_MASK)
12966 {
12967 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12968 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12969 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12970 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12971 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12972 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12973 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12974 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12975 }
12976 }
12977
12978 *pGCPtrEff = u16EffAddr;
12979 }
12980 else
12981 {
12982 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12983 uint32_t u32EffAddr;
12984
12985 /* Handle the disp32 form with no registers first. */
12986 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12987 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12988 else
12989 {
12990 /* Get the register (or SIB) value. */
12991 switch ((bRm & X86_MODRM_RM_MASK))
12992 {
12993 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12994 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12995 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12996 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12997 case 4: /* SIB */
12998 {
12999 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13000
13001 /* Get the index and scale it. */
13002 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13003 {
13004 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13005 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13006 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13007 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13008 case 4: u32EffAddr = 0; /*none */ break;
13009 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13010 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13011 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13012 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13013 }
13014 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13015
13016 /* add base */
13017 switch (bSib & X86_SIB_BASE_MASK)
13018 {
13019 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13020 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13021 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13022 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13023 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13024 case 5:
13025 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13026 {
13027 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13028 SET_SS_DEF();
13029 }
13030 else
13031 {
13032 uint32_t u32Disp;
13033 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13034 u32EffAddr += u32Disp;
13035 }
13036 break;
13037 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13038 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13039 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13040 }
13041 break;
13042 }
13043 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13044 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13045 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13046 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13047 }
13048
13049 /* Get and add the displacement. */
13050 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13051 {
13052 case 0:
13053 break;
13054 case 1:
13055 {
13056 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13057 u32EffAddr += i8Disp;
13058 break;
13059 }
13060 case 2:
13061 {
13062 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13063 u32EffAddr += u32Disp;
13064 break;
13065 }
13066 default:
13067 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13068 }
13069
13070 }
13071 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13072 *pGCPtrEff = u32EffAddr;
13073 else
13074 {
13075 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13076 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13077 }
13078 }
13079 }
13080 else
13081 {
13082 uint64_t u64EffAddr;
13083
13084 /* Handle the rip+disp32 form with no registers first. */
13085 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13086 {
13087 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13088 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13089 }
13090 else
13091 {
13092 /* Get the register (or SIB) value. */
13093 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13094 {
13095 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13096 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13097 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13098 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13099 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13100 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13101 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13102 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13103 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13104 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13105 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13106 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13107 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13108 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13109 /* SIB */
13110 case 4:
13111 case 12:
13112 {
13113 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13114
13115 /* Get the index and scale it. */
13116 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13117 {
13118 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13119 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13120 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13121 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13122 case 4: u64EffAddr = 0; /*none */ break;
13123 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13124 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13125 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13126 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13127 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13128 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13129 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13130 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13131 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13132 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13133 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13134 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13135 }
13136 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13137
13138 /* add base */
13139 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13140 {
13141 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13142 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13143 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13144 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13145 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13146 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13147 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13148 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13149 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13150 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13151 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13152 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13153 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13154 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13155 /* complicated encodings */
13156 case 5:
13157 case 13:
13158 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13159 {
13160 if (!pVCpu->iem.s.uRexB)
13161 {
13162 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13163 SET_SS_DEF();
13164 }
13165 else
13166 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13167 }
13168 else
13169 {
13170 uint32_t u32Disp;
13171 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13172 u64EffAddr += (int32_t)u32Disp;
13173 }
13174 break;
13175 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13176 }
13177 break;
13178 }
13179 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13180 }
13181
13182 /* Get and add the displacement. */
13183 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13184 {
13185 case 0:
13186 break;
13187 case 1:
13188 {
13189 int8_t i8Disp;
13190 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13191 u64EffAddr += i8Disp;
13192 break;
13193 }
13194 case 2:
13195 {
13196 uint32_t u32Disp;
13197 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13198 u64EffAddr += (int32_t)u32Disp;
13199 break;
13200 }
13201 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13202 }
13203
13204 }
13205
13206 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13207 *pGCPtrEff = u64EffAddr;
13208 else
13209 {
13210 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13211 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13212 }
13213 }
13214
13215 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13216 return VINF_SUCCESS;
13217}
13218
13219
13220/**
13221 * Calculates the effective address of a ModR/M memory operand.
13222 *
13223 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13224 *
13225 * @return Strict VBox status code.
13226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13227 * @param bRm The ModRM byte.
13228 * @param cbImm The size of any immediate following the
13229 * effective address opcode bytes. Important for
13230 * RIP relative addressing.
13231 * @param pGCPtrEff Where to return the effective address.
13232 * @param offRsp RSP displacement.
13233 */
13234IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13235{
13236 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13237# define SET_SS_DEF() \
13238 do \
13239 { \
13240 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13241 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13242 } while (0)
13243
13244 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13245 {
13246/** @todo Check the effective address size crap! */
13247 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13248 {
13249 uint16_t u16EffAddr;
13250
13251 /* Handle the disp16 form with no registers first. */
13252 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13253 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13254 else
13255 {
13256 /* Get the displacment. */
13257 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13258 {
13259 case 0: u16EffAddr = 0; break;
13260 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13261 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13262 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13263 }
13264
13265 /* Add the base and index registers to the disp. */
13266 switch (bRm & X86_MODRM_RM_MASK)
13267 {
13268 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13269 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13270 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13271 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13272 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13273 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13274 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13275 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13276 }
13277 }
13278
13279 *pGCPtrEff = u16EffAddr;
13280 }
13281 else
13282 {
13283 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13284 uint32_t u32EffAddr;
13285
13286 /* Handle the disp32 form with no registers first. */
13287 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13288 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13289 else
13290 {
13291 /* Get the register (or SIB) value. */
13292 switch ((bRm & X86_MODRM_RM_MASK))
13293 {
13294 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13295 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13296 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13297 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13298 case 4: /* SIB */
13299 {
13300 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13301
13302 /* Get the index and scale it. */
13303 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13304 {
13305 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13306 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13307 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13308 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13309 case 4: u32EffAddr = 0; /*none */ break;
13310 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13311 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13312 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13313 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13314 }
13315 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13316
13317 /* add base */
13318 switch (bSib & X86_SIB_BASE_MASK)
13319 {
13320 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13321 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13322 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13323 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13324 case 4:
13325 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13326 SET_SS_DEF();
13327 break;
13328 case 5:
13329 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13330 {
13331 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13332 SET_SS_DEF();
13333 }
13334 else
13335 {
13336 uint32_t u32Disp;
13337 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13338 u32EffAddr += u32Disp;
13339 }
13340 break;
13341 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13342 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13343 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13344 }
13345 break;
13346 }
13347 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13348 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13349 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13350 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13351 }
13352
13353 /* Get and add the displacement. */
13354 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13355 {
13356 case 0:
13357 break;
13358 case 1:
13359 {
13360 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13361 u32EffAddr += i8Disp;
13362 break;
13363 }
13364 case 2:
13365 {
13366 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13367 u32EffAddr += u32Disp;
13368 break;
13369 }
13370 default:
13371 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13372 }
13373
13374 }
13375 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13376 *pGCPtrEff = u32EffAddr;
13377 else
13378 {
13379 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13380 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13381 }
13382 }
13383 }
13384 else
13385 {
13386 uint64_t u64EffAddr;
13387
13388 /* Handle the rip+disp32 form with no registers first. */
13389 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13390 {
13391 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13392 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13393 }
13394 else
13395 {
13396 /* Get the register (or SIB) value. */
13397 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13398 {
13399 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13400 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13401 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13402 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13403 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13404 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13405 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13406 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13407 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13408 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13409 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13410 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13411 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13412 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13413 /* SIB */
13414 case 4:
13415 case 12:
13416 {
13417 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13418
13419 /* Get the index and scale it. */
13420 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13421 {
13422 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13423 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13424 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13425 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13426 case 4: u64EffAddr = 0; /*none */ break;
13427 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13428 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13429 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13430 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13431 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13432 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13433 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13434 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13435 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13436 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13437 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13438 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13439 }
13440 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13441
13442 /* add base */
13443 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13444 {
13445 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13446 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13447 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13448 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13449 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13450 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13451 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13452 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13453 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13454 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13455 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13456 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13457 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13458 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13459 /* complicated encodings */
13460 case 5:
13461 case 13:
13462 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13463 {
13464 if (!pVCpu->iem.s.uRexB)
13465 {
13466 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13467 SET_SS_DEF();
13468 }
13469 else
13470 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13471 }
13472 else
13473 {
13474 uint32_t u32Disp;
13475 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13476 u64EffAddr += (int32_t)u32Disp;
13477 }
13478 break;
13479 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13480 }
13481 break;
13482 }
13483 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13484 }
13485
13486 /* Get and add the displacement. */
13487 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13488 {
13489 case 0:
13490 break;
13491 case 1:
13492 {
13493 int8_t i8Disp;
13494 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13495 u64EffAddr += i8Disp;
13496 break;
13497 }
13498 case 2:
13499 {
13500 uint32_t u32Disp;
13501 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13502 u64EffAddr += (int32_t)u32Disp;
13503 break;
13504 }
13505 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13506 }
13507
13508 }
13509
13510 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13511 *pGCPtrEff = u64EffAddr;
13512 else
13513 {
13514 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13515 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13516 }
13517 }
13518
13519 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13520 return VINF_SUCCESS;
13521}
13522
13523
13524#ifdef IEM_WITH_SETJMP
13525/**
13526 * Calculates the effective address of a ModR/M memory operand.
13527 *
13528 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13529 *
13530 * May longjmp on internal error.
13531 *
13532 * @return The effective address.
13533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13534 * @param bRm The ModRM byte.
13535 * @param cbImm The size of any immediate following the
13536 * effective address opcode bytes. Important for
13537 * RIP relative addressing.
13538 */
13539IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13540{
13541 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13542# define SET_SS_DEF() \
13543 do \
13544 { \
13545 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13546 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13547 } while (0)
13548
13549 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13550 {
13551/** @todo Check the effective address size crap! */
13552 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13553 {
13554 uint16_t u16EffAddr;
13555
13556 /* Handle the disp16 form with no registers first. */
13557 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13558 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13559 else
13560 {
13561 /* Get the displacment. */
13562 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13563 {
13564 case 0: u16EffAddr = 0; break;
13565 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13566 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13567 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13568 }
13569
13570 /* Add the base and index registers to the disp. */
13571 switch (bRm & X86_MODRM_RM_MASK)
13572 {
13573 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13574 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13575 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13576 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13577 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13578 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13579 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13580 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13581 }
13582 }
13583
13584 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13585 return u16EffAddr;
13586 }
13587
13588 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13589 uint32_t u32EffAddr;
13590
13591 /* Handle the disp32 form with no registers first. */
13592 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13593 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13594 else
13595 {
13596 /* Get the register (or SIB) value. */
13597 switch ((bRm & X86_MODRM_RM_MASK))
13598 {
13599 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13600 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13601 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13602 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13603 case 4: /* SIB */
13604 {
13605 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13606
13607 /* Get the index and scale it. */
13608 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13609 {
13610 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13611 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13612 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13613 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13614 case 4: u32EffAddr = 0; /*none */ break;
13615 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13616 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13617 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13618 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13619 }
13620 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13621
13622 /* add base */
13623 switch (bSib & X86_SIB_BASE_MASK)
13624 {
13625 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13626 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13627 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13628 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13629 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13630 case 5:
13631 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13632 {
13633 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13634 SET_SS_DEF();
13635 }
13636 else
13637 {
13638 uint32_t u32Disp;
13639 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13640 u32EffAddr += u32Disp;
13641 }
13642 break;
13643 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13644 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13645 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13646 }
13647 break;
13648 }
13649 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13650 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13651 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13652 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13653 }
13654
13655 /* Get and add the displacement. */
13656 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13657 {
13658 case 0:
13659 break;
13660 case 1:
13661 {
13662 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13663 u32EffAddr += i8Disp;
13664 break;
13665 }
13666 case 2:
13667 {
13668 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13669 u32EffAddr += u32Disp;
13670 break;
13671 }
13672 default:
13673 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13674 }
13675 }
13676
13677 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13678 {
13679 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13680 return u32EffAddr;
13681 }
13682 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13683 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13684 return u32EffAddr & UINT16_MAX;
13685 }
13686
13687 uint64_t u64EffAddr;
13688
13689 /* Handle the rip+disp32 form with no registers first. */
13690 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13691 {
13692 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13693 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13694 }
13695 else
13696 {
13697 /* Get the register (or SIB) value. */
13698 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13699 {
13700 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13701 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13702 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13703 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13704 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13705 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13706 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13707 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13708 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13709 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13710 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13711 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13712 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13713 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13714 /* SIB */
13715 case 4:
13716 case 12:
13717 {
13718 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13719
13720 /* Get the index and scale it. */
13721 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13722 {
13723 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13724 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13725 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13726 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13727 case 4: u64EffAddr = 0; /*none */ break;
13728 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13729 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13730 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13731 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13732 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13733 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13734 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13735 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13736 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13737 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13738 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13739 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13740 }
13741 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13742
13743 /* add base */
13744 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13745 {
13746 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13747 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13748 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13749 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13750 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13751 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13752 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13753 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13754 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13755 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13756 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13757 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13758 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13759 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13760 /* complicated encodings */
13761 case 5:
13762 case 13:
13763 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13764 {
13765 if (!pVCpu->iem.s.uRexB)
13766 {
13767 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13768 SET_SS_DEF();
13769 }
13770 else
13771 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13772 }
13773 else
13774 {
13775 uint32_t u32Disp;
13776 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13777 u64EffAddr += (int32_t)u32Disp;
13778 }
13779 break;
13780 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13781 }
13782 break;
13783 }
13784 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13785 }
13786
13787 /* Get and add the displacement. */
13788 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13789 {
13790 case 0:
13791 break;
13792 case 1:
13793 {
13794 int8_t i8Disp;
13795 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13796 u64EffAddr += i8Disp;
13797 break;
13798 }
13799 case 2:
13800 {
13801 uint32_t u32Disp;
13802 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13803 u64EffAddr += (int32_t)u32Disp;
13804 break;
13805 }
13806 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13807 }
13808
13809 }
13810
13811 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13812 {
13813 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13814 return u64EffAddr;
13815 }
13816 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13817 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13818 return u64EffAddr & UINT32_MAX;
13819}
13820#endif /* IEM_WITH_SETJMP */
13821
13822/** @} */
13823
13824
13825
13826/*
13827 * Include the instructions
13828 */
13829#include "IEMAllInstructions.cpp.h"
13830
13831
13832
13833#ifdef LOG_ENABLED
13834/**
13835 * Logs the current instruction.
13836 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13837 * @param fSameCtx Set if we have the same context information as the VMM,
13838 * clear if we may have already executed an instruction in
13839 * our debug context. When clear, we assume IEMCPU holds
13840 * valid CPU mode info.
13841 *
13842 * The @a fSameCtx parameter is now misleading and obsolete.
13843 * @param pszFunction The IEM function doing the execution.
13844 */
13845IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13846{
13847# ifdef IN_RING3
13848 if (LogIs2Enabled())
13849 {
13850 char szInstr[256];
13851 uint32_t cbInstr = 0;
13852 if (fSameCtx)
13853 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13854 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13855 szInstr, sizeof(szInstr), &cbInstr);
13856 else
13857 {
13858 uint32_t fFlags = 0;
13859 switch (pVCpu->iem.s.enmCpuMode)
13860 {
13861 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13862 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13863 case IEMMODE_16BIT:
13864 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13865 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13866 else
13867 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13868 break;
13869 }
13870 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13871 szInstr, sizeof(szInstr), &cbInstr);
13872 }
13873
13874 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13875 Log2(("**** %s\n"
13876 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13877 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13878 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13879 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13880 " %s\n"
13881 , pszFunction,
13882 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13883 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13884 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13885 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13886 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13887 szInstr));
13888
13889 if (LogIs3Enabled())
13890 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13891 }
13892 else
13893# endif
13894 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13895 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13896 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13897}
13898#endif /* LOG_ENABLED */
13899
13900
13901/**
13902 * Makes status code addjustments (pass up from I/O and access handler)
13903 * as well as maintaining statistics.
13904 *
13905 * @returns Strict VBox status code to pass up.
13906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13907 * @param rcStrict The status from executing an instruction.
13908 */
13909DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13910{
13911 if (rcStrict != VINF_SUCCESS)
13912 {
13913 if (RT_SUCCESS(rcStrict))
13914 {
13915 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13916 || rcStrict == VINF_IOM_R3_IOPORT_READ
13917 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13918 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13919 || rcStrict == VINF_IOM_R3_MMIO_READ
13920 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13921 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13922 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13923 || rcStrict == VINF_CPUM_R3_MSR_READ
13924 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13925 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13926 || rcStrict == VINF_EM_RAW_TO_R3
13927 || rcStrict == VINF_EM_TRIPLE_FAULT
13928 || rcStrict == VINF_GIM_R3_HYPERCALL
13929 /* raw-mode / virt handlers only: */
13930 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13931 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13932 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13933 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13934 || rcStrict == VINF_SELM_SYNC_GDT
13935 || rcStrict == VINF_CSAM_PENDING_ACTION
13936 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13937 /* nested hw.virt codes: */
13938 || rcStrict == VINF_VMX_VMEXIT
13939 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13940 || rcStrict == VINF_SVM_VMEXIT
13941 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13942/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13943 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13944#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13945 if ( rcStrict == VINF_VMX_VMEXIT
13946 && rcPassUp == VINF_SUCCESS)
13947 rcStrict = VINF_SUCCESS;
13948 else
13949#endif
13950#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13951 if ( rcStrict == VINF_SVM_VMEXIT
13952 && rcPassUp == VINF_SUCCESS)
13953 rcStrict = VINF_SUCCESS;
13954 else
13955#endif
13956 if (rcPassUp == VINF_SUCCESS)
13957 pVCpu->iem.s.cRetInfStatuses++;
13958 else if ( rcPassUp < VINF_EM_FIRST
13959 || rcPassUp > VINF_EM_LAST
13960 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13961 {
13962 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13963 pVCpu->iem.s.cRetPassUpStatus++;
13964 rcStrict = rcPassUp;
13965 }
13966 else
13967 {
13968 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13969 pVCpu->iem.s.cRetInfStatuses++;
13970 }
13971 }
13972 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13973 pVCpu->iem.s.cRetAspectNotImplemented++;
13974 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13975 pVCpu->iem.s.cRetInstrNotImplemented++;
13976 else
13977 pVCpu->iem.s.cRetErrStatuses++;
13978 }
13979 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13980 {
13981 pVCpu->iem.s.cRetPassUpStatus++;
13982 rcStrict = pVCpu->iem.s.rcPassUp;
13983 }
13984
13985 return rcStrict;
13986}
13987
13988
13989/**
13990 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13991 * IEMExecOneWithPrefetchedByPC.
13992 *
13993 * Similar code is found in IEMExecLots.
13994 *
13995 * @return Strict VBox status code.
13996 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13997 * @param fExecuteInhibit If set, execute the instruction following CLI,
13998 * POP SS and MOV SS,GR.
13999 * @param pszFunction The calling function name.
14000 */
14001DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
14002{
14003 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14004 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14005 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14006 RT_NOREF_PV(pszFunction);
14007
14008#ifdef IEM_WITH_SETJMP
14009 VBOXSTRICTRC rcStrict;
14010 jmp_buf JmpBuf;
14011 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14012 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14013 if ((rcStrict = setjmp(JmpBuf)) == 0)
14014 {
14015 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14016 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14017 }
14018 else
14019 pVCpu->iem.s.cLongJumps++;
14020 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14021#else
14022 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14023 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14024#endif
14025 if (rcStrict == VINF_SUCCESS)
14026 pVCpu->iem.s.cInstructions++;
14027 if (pVCpu->iem.s.cActiveMappings > 0)
14028 {
14029 Assert(rcStrict != VINF_SUCCESS);
14030 iemMemRollback(pVCpu);
14031 }
14032 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14033 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14034 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14035
14036//#ifdef DEBUG
14037// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14038//#endif
14039
14040#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14041 /*
14042 * Perform any VMX nested-guest instruction boundary actions.
14043 *
14044 * If any of these causes a VM-exit, we must skip executing the next
14045 * instruction (would run into stale page tables). A VM-exit makes sure
14046 * there is no interrupt-inhibition, so that should ensure we don't go
14047 * to try execute the next instruction. Clearing fExecuteInhibit is
14048 * problematic because of the setjmp/longjmp clobbering above.
14049 */
14050 if ( rcStrict == VINF_SUCCESS
14051 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14052 {
14053 /* TPR-below threshold/APIC write has the highest priority. */
14054 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14055 {
14056 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14057 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14058 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14059 }
14060 /* MTF takes priority over VMX-preemption timer. */
14061 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14062 {
14063 rcStrict = iemVmxVmexitMtf(pVCpu);
14064 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14065 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14066 }
14067 /* VMX preemption timer takes priority over NMI-window exits. */
14068 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14069 {
14070 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14071 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14072 rcStrict = VINF_SUCCESS;
14073 else
14074 {
14075 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14076 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14077 }
14078 }
14079 /* NMI-window VM-exit. */
14080 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW))
14081 {
14082 rcStrict = iemVmxVmexitNmiWindow(pVCpu);
14083 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14084 }
14085 }
14086#endif
14087
14088 /* Execute the next instruction as well if a cli, pop ss or
14089 mov ss, Gr has just completed successfully. */
14090 if ( fExecuteInhibit
14091 && rcStrict == VINF_SUCCESS
14092 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14093 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14094 {
14095 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14096 if (rcStrict == VINF_SUCCESS)
14097 {
14098#ifdef LOG_ENABLED
14099 iemLogCurInstr(pVCpu, false, pszFunction);
14100#endif
14101#ifdef IEM_WITH_SETJMP
14102 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14103 if ((rcStrict = setjmp(JmpBuf)) == 0)
14104 {
14105 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14106 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14107 }
14108 else
14109 pVCpu->iem.s.cLongJumps++;
14110 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14111#else
14112 IEM_OPCODE_GET_NEXT_U8(&b);
14113 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14114#endif
14115 if (rcStrict == VINF_SUCCESS)
14116 pVCpu->iem.s.cInstructions++;
14117 if (pVCpu->iem.s.cActiveMappings > 0)
14118 {
14119 Assert(rcStrict != VINF_SUCCESS);
14120 iemMemRollback(pVCpu);
14121 }
14122 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14123 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14124 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14125 }
14126 else if (pVCpu->iem.s.cActiveMappings > 0)
14127 iemMemRollback(pVCpu);
14128 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14129 }
14130
14131 /*
14132 * Return value fiddling, statistics and sanity assertions.
14133 */
14134 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14135
14136 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14138 return rcStrict;
14139}
14140
14141
14142#ifdef IN_RC
14143/**
14144 * Re-enters raw-mode or ensure we return to ring-3.
14145 *
14146 * @returns rcStrict, maybe modified.
14147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14148 * @param rcStrict The status code returne by the interpreter.
14149 */
14150DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14151{
14152 if ( !pVCpu->iem.s.fInPatchCode
14153 && ( rcStrict == VINF_SUCCESS
14154 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14155 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14156 {
14157 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14158 CPUMRawEnter(pVCpu);
14159 else
14160 {
14161 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14162 rcStrict = VINF_EM_RESCHEDULE;
14163 }
14164 }
14165 return rcStrict;
14166}
14167#endif
14168
14169
14170/**
14171 * Execute one instruction.
14172 *
14173 * @return Strict VBox status code.
14174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14175 */
14176VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14177{
14178#ifdef LOG_ENABLED
14179 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14180#endif
14181
14182 /*
14183 * Do the decoding and emulation.
14184 */
14185 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14186 if (rcStrict == VINF_SUCCESS)
14187 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14188 else if (pVCpu->iem.s.cActiveMappings > 0)
14189 iemMemRollback(pVCpu);
14190
14191#ifdef IN_RC
14192 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14193#endif
14194 if (rcStrict != VINF_SUCCESS)
14195 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14196 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14197 return rcStrict;
14198}
14199
14200
14201VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14202{
14203 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14204
14205 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14206 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14207 if (rcStrict == VINF_SUCCESS)
14208 {
14209 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14210 if (pcbWritten)
14211 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14212 }
14213 else if (pVCpu->iem.s.cActiveMappings > 0)
14214 iemMemRollback(pVCpu);
14215
14216#ifdef IN_RC
14217 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14218#endif
14219 return rcStrict;
14220}
14221
14222
14223VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14224 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14225{
14226 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14227
14228 VBOXSTRICTRC rcStrict;
14229 if ( cbOpcodeBytes
14230 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14231 {
14232 iemInitDecoder(pVCpu, false);
14233#ifdef IEM_WITH_CODE_TLB
14234 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14235 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14236 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14237 pVCpu->iem.s.offCurInstrStart = 0;
14238 pVCpu->iem.s.offInstrNextByte = 0;
14239#else
14240 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14241 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14242#endif
14243 rcStrict = VINF_SUCCESS;
14244 }
14245 else
14246 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14247 if (rcStrict == VINF_SUCCESS)
14248 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14249 else if (pVCpu->iem.s.cActiveMappings > 0)
14250 iemMemRollback(pVCpu);
14251
14252#ifdef IN_RC
14253 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14254#endif
14255 return rcStrict;
14256}
14257
14258
14259VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14260{
14261 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14262
14263 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14264 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14265 if (rcStrict == VINF_SUCCESS)
14266 {
14267 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14268 if (pcbWritten)
14269 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14270 }
14271 else if (pVCpu->iem.s.cActiveMappings > 0)
14272 iemMemRollback(pVCpu);
14273
14274#ifdef IN_RC
14275 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14276#endif
14277 return rcStrict;
14278}
14279
14280
14281VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14282 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14283{
14284 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14285
14286 VBOXSTRICTRC rcStrict;
14287 if ( cbOpcodeBytes
14288 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14289 {
14290 iemInitDecoder(pVCpu, true);
14291#ifdef IEM_WITH_CODE_TLB
14292 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14293 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14294 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14295 pVCpu->iem.s.offCurInstrStart = 0;
14296 pVCpu->iem.s.offInstrNextByte = 0;
14297#else
14298 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14299 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14300#endif
14301 rcStrict = VINF_SUCCESS;
14302 }
14303 else
14304 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14305 if (rcStrict == VINF_SUCCESS)
14306 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14307 else if (pVCpu->iem.s.cActiveMappings > 0)
14308 iemMemRollback(pVCpu);
14309
14310#ifdef IN_RC
14311 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14312#endif
14313 return rcStrict;
14314}
14315
14316
14317/**
14318 * For debugging DISGetParamSize, may come in handy.
14319 *
14320 * @returns Strict VBox status code.
14321 * @param pVCpu The cross context virtual CPU structure of the
14322 * calling EMT.
14323 * @param pCtxCore The context core structure.
14324 * @param OpcodeBytesPC The PC of the opcode bytes.
14325 * @param pvOpcodeBytes Prefeched opcode bytes.
14326 * @param cbOpcodeBytes Number of prefetched bytes.
14327 * @param pcbWritten Where to return the number of bytes written.
14328 * Optional.
14329 */
14330VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14331 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14332 uint32_t *pcbWritten)
14333{
14334 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14335
14336 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14337 VBOXSTRICTRC rcStrict;
14338 if ( cbOpcodeBytes
14339 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14340 {
14341 iemInitDecoder(pVCpu, true);
14342#ifdef IEM_WITH_CODE_TLB
14343 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14344 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14345 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14346 pVCpu->iem.s.offCurInstrStart = 0;
14347 pVCpu->iem.s.offInstrNextByte = 0;
14348#else
14349 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14350 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14351#endif
14352 rcStrict = VINF_SUCCESS;
14353 }
14354 else
14355 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14356 if (rcStrict == VINF_SUCCESS)
14357 {
14358 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14359 if (pcbWritten)
14360 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14361 }
14362 else if (pVCpu->iem.s.cActiveMappings > 0)
14363 iemMemRollback(pVCpu);
14364
14365#ifdef IN_RC
14366 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14367#endif
14368 return rcStrict;
14369}
14370
14371
14372VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14373{
14374 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14375 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14376
14377 /*
14378 * See if there is an interrupt pending in TRPM, inject it if we can.
14379 */
14380 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14381#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14382 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14383 if (fIntrEnabled)
14384 {
14385 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14386 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14387 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14388 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14389 else
14390 {
14391 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14392 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14393 }
14394 }
14395#else
14396 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14397#endif
14398 if ( fIntrEnabled
14399 && TRPMHasTrap(pVCpu)
14400 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14401 {
14402 uint8_t u8TrapNo;
14403 TRPMEVENT enmType;
14404 RTGCUINT uErrCode;
14405 RTGCPTR uCr2;
14406 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14407 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14408 TRPMResetTrap(pVCpu);
14409#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14410 /* Injecting an event may cause a VM-exit. */
14411 if ( rcStrict != VINF_SUCCESS
14412 && rcStrict != VINF_IEM_RAISED_XCPT)
14413 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14414#else
14415 NOREF(rcStrict);
14416#endif
14417 }
14418
14419 /*
14420 * Initial decoder init w/ prefetch, then setup setjmp.
14421 */
14422 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14423 if (rcStrict == VINF_SUCCESS)
14424 {
14425#ifdef IEM_WITH_SETJMP
14426 jmp_buf JmpBuf;
14427 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14428 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14429 pVCpu->iem.s.cActiveMappings = 0;
14430 if ((rcStrict = setjmp(JmpBuf)) == 0)
14431#endif
14432 {
14433 /*
14434 * The run loop. We limit ourselves to 4096 instructions right now.
14435 */
14436 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14437 PVM pVM = pVCpu->CTX_SUFF(pVM);
14438 for (;;)
14439 {
14440 /*
14441 * Log the state.
14442 */
14443#ifdef LOG_ENABLED
14444 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14445#endif
14446
14447 /*
14448 * Do the decoding and emulation.
14449 */
14450 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14451 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14452 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14453 {
14454 Assert(pVCpu->iem.s.cActiveMappings == 0);
14455 pVCpu->iem.s.cInstructions++;
14456 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14457 {
14458 uint64_t fCpu = pVCpu->fLocalForcedActions
14459 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14460 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14461 | VMCPU_FF_TLB_FLUSH
14462#ifdef VBOX_WITH_RAW_MODE
14463 | VMCPU_FF_TRPM_SYNC_IDT
14464 | VMCPU_FF_SELM_SYNC_TSS
14465 | VMCPU_FF_SELM_SYNC_GDT
14466 | VMCPU_FF_SELM_SYNC_LDT
14467#endif
14468 | VMCPU_FF_INHIBIT_INTERRUPTS
14469 | VMCPU_FF_BLOCK_NMIS
14470 | VMCPU_FF_UNHALT ));
14471
14472 if (RT_LIKELY( ( !fCpu
14473 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14474 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14475 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14476 {
14477 if (cMaxInstructionsGccStupidity-- > 0)
14478 {
14479 /* Poll timers every now an then according to the caller's specs. */
14480 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14481 || !TMTimerPollBool(pVM, pVCpu))
14482 {
14483 Assert(pVCpu->iem.s.cActiveMappings == 0);
14484 iemReInitDecoder(pVCpu);
14485 continue;
14486 }
14487 }
14488 }
14489 }
14490 Assert(pVCpu->iem.s.cActiveMappings == 0);
14491 }
14492 else if (pVCpu->iem.s.cActiveMappings > 0)
14493 iemMemRollback(pVCpu);
14494 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14495 break;
14496 }
14497 }
14498#ifdef IEM_WITH_SETJMP
14499 else
14500 {
14501 if (pVCpu->iem.s.cActiveMappings > 0)
14502 iemMemRollback(pVCpu);
14503# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14504 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14505# endif
14506 pVCpu->iem.s.cLongJumps++;
14507 }
14508 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14509#endif
14510
14511 /*
14512 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14513 */
14514 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14515 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14516 }
14517 else
14518 {
14519 if (pVCpu->iem.s.cActiveMappings > 0)
14520 iemMemRollback(pVCpu);
14521
14522#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14523 /*
14524 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14525 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14526 */
14527 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14528#endif
14529 }
14530
14531 /*
14532 * Maybe re-enter raw-mode and log.
14533 */
14534#ifdef IN_RC
14535 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14536#endif
14537 if (rcStrict != VINF_SUCCESS)
14538 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14539 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14540 if (pcInstructions)
14541 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14542 return rcStrict;
14543}
14544
14545
14546/**
14547 * Interface used by EMExecuteExec, does exit statistics and limits.
14548 *
14549 * @returns Strict VBox status code.
14550 * @param pVCpu The cross context virtual CPU structure.
14551 * @param fWillExit To be defined.
14552 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14553 * @param cMaxInstructions Maximum number of instructions to execute.
14554 * @param cMaxInstructionsWithoutExits
14555 * The max number of instructions without exits.
14556 * @param pStats Where to return statistics.
14557 */
14558VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14559 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14560{
14561 NOREF(fWillExit); /** @todo define flexible exit crits */
14562
14563 /*
14564 * Initialize return stats.
14565 */
14566 pStats->cInstructions = 0;
14567 pStats->cExits = 0;
14568 pStats->cMaxExitDistance = 0;
14569 pStats->cReserved = 0;
14570
14571 /*
14572 * Initial decoder init w/ prefetch, then setup setjmp.
14573 */
14574 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14575 if (rcStrict == VINF_SUCCESS)
14576 {
14577#ifdef IEM_WITH_SETJMP
14578 jmp_buf JmpBuf;
14579 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14580 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14581 pVCpu->iem.s.cActiveMappings = 0;
14582 if ((rcStrict = setjmp(JmpBuf)) == 0)
14583#endif
14584 {
14585#ifdef IN_RING0
14586 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14587#endif
14588 uint32_t cInstructionSinceLastExit = 0;
14589
14590 /*
14591 * The run loop. We limit ourselves to 4096 instructions right now.
14592 */
14593 PVM pVM = pVCpu->CTX_SUFF(pVM);
14594 for (;;)
14595 {
14596 /*
14597 * Log the state.
14598 */
14599#ifdef LOG_ENABLED
14600 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14601#endif
14602
14603 /*
14604 * Do the decoding and emulation.
14605 */
14606 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14607
14608 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14609 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14610
14611 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14612 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14613 {
14614 pStats->cExits += 1;
14615 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14616 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14617 cInstructionSinceLastExit = 0;
14618 }
14619
14620 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14621 {
14622 Assert(pVCpu->iem.s.cActiveMappings == 0);
14623 pVCpu->iem.s.cInstructions++;
14624 pStats->cInstructions++;
14625 cInstructionSinceLastExit++;
14626 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14627 {
14628 uint64_t fCpu = pVCpu->fLocalForcedActions
14629 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14630 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14631 | VMCPU_FF_TLB_FLUSH
14632#ifdef VBOX_WITH_RAW_MODE
14633 | VMCPU_FF_TRPM_SYNC_IDT
14634 | VMCPU_FF_SELM_SYNC_TSS
14635 | VMCPU_FF_SELM_SYNC_GDT
14636 | VMCPU_FF_SELM_SYNC_LDT
14637#endif
14638 | VMCPU_FF_INHIBIT_INTERRUPTS
14639 | VMCPU_FF_BLOCK_NMIS
14640 | VMCPU_FF_UNHALT ));
14641
14642 if (RT_LIKELY( ( ( !fCpu
14643 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14644 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14645 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14646 || pStats->cInstructions < cMinInstructions))
14647 {
14648 if (pStats->cInstructions < cMaxInstructions)
14649 {
14650 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14651 {
14652#ifdef IN_RING0
14653 if ( !fCheckPreemptionPending
14654 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14655#endif
14656 {
14657 Assert(pVCpu->iem.s.cActiveMappings == 0);
14658 iemReInitDecoder(pVCpu);
14659 continue;
14660 }
14661#ifdef IN_RING0
14662 rcStrict = VINF_EM_RAW_INTERRUPT;
14663 break;
14664#endif
14665 }
14666 }
14667 }
14668 Assert(!(fCpu & VMCPU_FF_IEM));
14669 }
14670 Assert(pVCpu->iem.s.cActiveMappings == 0);
14671 }
14672 else if (pVCpu->iem.s.cActiveMappings > 0)
14673 iemMemRollback(pVCpu);
14674 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14675 break;
14676 }
14677 }
14678#ifdef IEM_WITH_SETJMP
14679 else
14680 {
14681 if (pVCpu->iem.s.cActiveMappings > 0)
14682 iemMemRollback(pVCpu);
14683 pVCpu->iem.s.cLongJumps++;
14684 }
14685 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14686#endif
14687
14688 /*
14689 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14690 */
14691 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14692 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14693 }
14694 else
14695 {
14696 if (pVCpu->iem.s.cActiveMappings > 0)
14697 iemMemRollback(pVCpu);
14698
14699#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14700 /*
14701 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14702 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14703 */
14704 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14705#endif
14706 }
14707
14708 /*
14709 * Maybe re-enter raw-mode and log.
14710 */
14711#ifdef IN_RC
14712 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14713#endif
14714 if (rcStrict != VINF_SUCCESS)
14715 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14716 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14717 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14718 return rcStrict;
14719}
14720
14721
14722/**
14723 * Injects a trap, fault, abort, software interrupt or external interrupt.
14724 *
14725 * The parameter list matches TRPMQueryTrapAll pretty closely.
14726 *
14727 * @returns Strict VBox status code.
14728 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14729 * @param u8TrapNo The trap number.
14730 * @param enmType What type is it (trap/fault/abort), software
14731 * interrupt or hardware interrupt.
14732 * @param uErrCode The error code if applicable.
14733 * @param uCr2 The CR2 value if applicable.
14734 * @param cbInstr The instruction length (only relevant for
14735 * software interrupts).
14736 */
14737VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14738 uint8_t cbInstr)
14739{
14740 iemInitDecoder(pVCpu, false);
14741#ifdef DBGFTRACE_ENABLED
14742 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14743 u8TrapNo, enmType, uErrCode, uCr2);
14744#endif
14745
14746 uint32_t fFlags;
14747 switch (enmType)
14748 {
14749 case TRPM_HARDWARE_INT:
14750 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14751 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14752 uErrCode = uCr2 = 0;
14753 break;
14754
14755 case TRPM_SOFTWARE_INT:
14756 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14757 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14758 uErrCode = uCr2 = 0;
14759 break;
14760
14761 case TRPM_TRAP:
14762 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14763 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14764 if (u8TrapNo == X86_XCPT_PF)
14765 fFlags |= IEM_XCPT_FLAGS_CR2;
14766 switch (u8TrapNo)
14767 {
14768 case X86_XCPT_DF:
14769 case X86_XCPT_TS:
14770 case X86_XCPT_NP:
14771 case X86_XCPT_SS:
14772 case X86_XCPT_PF:
14773 case X86_XCPT_AC:
14774 fFlags |= IEM_XCPT_FLAGS_ERR;
14775 break;
14776 }
14777 break;
14778
14779 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14780 }
14781
14782 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14783
14784 if (pVCpu->iem.s.cActiveMappings > 0)
14785 iemMemRollback(pVCpu);
14786
14787 return rcStrict;
14788}
14789
14790
14791/**
14792 * Injects the active TRPM event.
14793 *
14794 * @returns Strict VBox status code.
14795 * @param pVCpu The cross context virtual CPU structure.
14796 */
14797VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14798{
14799#ifndef IEM_IMPLEMENTS_TASKSWITCH
14800 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14801#else
14802 uint8_t u8TrapNo;
14803 TRPMEVENT enmType;
14804 RTGCUINT uErrCode;
14805 RTGCUINTPTR uCr2;
14806 uint8_t cbInstr;
14807 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14808 if (RT_FAILURE(rc))
14809 return rc;
14810
14811 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14812#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14813 if (rcStrict == VINF_SVM_VMEXIT)
14814 rcStrict = VINF_SUCCESS;
14815#endif
14816#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14817 if (rcStrict == VINF_VMX_VMEXIT)
14818 rcStrict = VINF_SUCCESS;
14819#endif
14820 /** @todo Are there any other codes that imply the event was successfully
14821 * delivered to the guest? See @bugref{6607}. */
14822 if ( rcStrict == VINF_SUCCESS
14823 || rcStrict == VINF_IEM_RAISED_XCPT)
14824 TRPMResetTrap(pVCpu);
14825
14826 return rcStrict;
14827#endif
14828}
14829
14830
14831VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14832{
14833 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14834 return VERR_NOT_IMPLEMENTED;
14835}
14836
14837
14838VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14839{
14840 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14841 return VERR_NOT_IMPLEMENTED;
14842}
14843
14844
14845#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14846/**
14847 * Executes a IRET instruction with default operand size.
14848 *
14849 * This is for PATM.
14850 *
14851 * @returns VBox status code.
14852 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14853 * @param pCtxCore The register frame.
14854 */
14855VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14856{
14857 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14858
14859 iemCtxCoreToCtx(pCtx, pCtxCore);
14860 iemInitDecoder(pVCpu);
14861 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14862 if (rcStrict == VINF_SUCCESS)
14863 iemCtxToCtxCore(pCtxCore, pCtx);
14864 else
14865 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14866 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14867 return rcStrict;
14868}
14869#endif
14870
14871
14872/**
14873 * Macro used by the IEMExec* method to check the given instruction length.
14874 *
14875 * Will return on failure!
14876 *
14877 * @param a_cbInstr The given instruction length.
14878 * @param a_cbMin The minimum length.
14879 */
14880#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14881 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14882 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14883
14884
14885/**
14886 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14887 *
14888 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14889 *
14890 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14892 * @param rcStrict The status code to fiddle.
14893 */
14894DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14895{
14896 iemUninitExec(pVCpu);
14897#ifdef IN_RC
14898 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14899#else
14900 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14901#endif
14902}
14903
14904
14905/**
14906 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14907 *
14908 * This API ASSUMES that the caller has already verified that the guest code is
14909 * allowed to access the I/O port. (The I/O port is in the DX register in the
14910 * guest state.)
14911 *
14912 * @returns Strict VBox status code.
14913 * @param pVCpu The cross context virtual CPU structure.
14914 * @param cbValue The size of the I/O port access (1, 2, or 4).
14915 * @param enmAddrMode The addressing mode.
14916 * @param fRepPrefix Indicates whether a repeat prefix is used
14917 * (doesn't matter which for this instruction).
14918 * @param cbInstr The instruction length in bytes.
14919 * @param iEffSeg The effective segment address.
14920 * @param fIoChecked Whether the access to the I/O port has been
14921 * checked or not. It's typically checked in the
14922 * HM scenario.
14923 */
14924VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14925 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14926{
14927 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14928 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14929
14930 /*
14931 * State init.
14932 */
14933 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14934
14935 /*
14936 * Switch orgy for getting to the right handler.
14937 */
14938 VBOXSTRICTRC rcStrict;
14939 if (fRepPrefix)
14940 {
14941 switch (enmAddrMode)
14942 {
14943 case IEMMODE_16BIT:
14944 switch (cbValue)
14945 {
14946 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14947 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14948 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14949 default:
14950 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14951 }
14952 break;
14953
14954 case IEMMODE_32BIT:
14955 switch (cbValue)
14956 {
14957 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14958 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14959 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14960 default:
14961 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14962 }
14963 break;
14964
14965 case IEMMODE_64BIT:
14966 switch (cbValue)
14967 {
14968 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14969 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14970 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14971 default:
14972 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14973 }
14974 break;
14975
14976 default:
14977 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14978 }
14979 }
14980 else
14981 {
14982 switch (enmAddrMode)
14983 {
14984 case IEMMODE_16BIT:
14985 switch (cbValue)
14986 {
14987 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14988 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14989 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14990 default:
14991 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14992 }
14993 break;
14994
14995 case IEMMODE_32BIT:
14996 switch (cbValue)
14997 {
14998 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14999 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15000 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15001 default:
15002 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15003 }
15004 break;
15005
15006 case IEMMODE_64BIT:
15007 switch (cbValue)
15008 {
15009 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15010 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15011 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15012 default:
15013 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15014 }
15015 break;
15016
15017 default:
15018 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15019 }
15020 }
15021
15022 if (pVCpu->iem.s.cActiveMappings)
15023 iemMemRollback(pVCpu);
15024
15025 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15026}
15027
15028
15029/**
15030 * Interface for HM and EM for executing string I/O IN (read) instructions.
15031 *
15032 * This API ASSUMES that the caller has already verified that the guest code is
15033 * allowed to access the I/O port. (The I/O port is in the DX register in the
15034 * guest state.)
15035 *
15036 * @returns Strict VBox status code.
15037 * @param pVCpu The cross context virtual CPU structure.
15038 * @param cbValue The size of the I/O port access (1, 2, or 4).
15039 * @param enmAddrMode The addressing mode.
15040 * @param fRepPrefix Indicates whether a repeat prefix is used
15041 * (doesn't matter which for this instruction).
15042 * @param cbInstr The instruction length in bytes.
15043 * @param fIoChecked Whether the access to the I/O port has been
15044 * checked or not. It's typically checked in the
15045 * HM scenario.
15046 */
15047VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15048 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15049{
15050 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15051
15052 /*
15053 * State init.
15054 */
15055 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15056
15057 /*
15058 * Switch orgy for getting to the right handler.
15059 */
15060 VBOXSTRICTRC rcStrict;
15061 if (fRepPrefix)
15062 {
15063 switch (enmAddrMode)
15064 {
15065 case IEMMODE_16BIT:
15066 switch (cbValue)
15067 {
15068 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15069 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15070 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15071 default:
15072 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15073 }
15074 break;
15075
15076 case IEMMODE_32BIT:
15077 switch (cbValue)
15078 {
15079 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15080 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15081 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15082 default:
15083 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15084 }
15085 break;
15086
15087 case IEMMODE_64BIT:
15088 switch (cbValue)
15089 {
15090 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15091 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15092 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15093 default:
15094 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15095 }
15096 break;
15097
15098 default:
15099 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15100 }
15101 }
15102 else
15103 {
15104 switch (enmAddrMode)
15105 {
15106 case IEMMODE_16BIT:
15107 switch (cbValue)
15108 {
15109 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15110 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15111 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15112 default:
15113 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15114 }
15115 break;
15116
15117 case IEMMODE_32BIT:
15118 switch (cbValue)
15119 {
15120 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15121 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15122 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15123 default:
15124 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15125 }
15126 break;
15127
15128 case IEMMODE_64BIT:
15129 switch (cbValue)
15130 {
15131 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15132 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15133 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15134 default:
15135 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15136 }
15137 break;
15138
15139 default:
15140 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15141 }
15142 }
15143
15144 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15145 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15146}
15147
15148
15149/**
15150 * Interface for rawmode to write execute an OUT instruction.
15151 *
15152 * @returns Strict VBox status code.
15153 * @param pVCpu The cross context virtual CPU structure.
15154 * @param cbInstr The instruction length in bytes.
15155 * @param u16Port The port to read.
15156 * @param fImm Whether the port is specified using an immediate operand or
15157 * using the implicit DX register.
15158 * @param cbReg The register size.
15159 *
15160 * @remarks In ring-0 not all of the state needs to be synced in.
15161 */
15162VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15163{
15164 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15165 Assert(cbReg <= 4 && cbReg != 3);
15166
15167 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15168 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15169 Assert(!pVCpu->iem.s.cActiveMappings);
15170 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15171}
15172
15173
15174/**
15175 * Interface for rawmode to write execute an IN instruction.
15176 *
15177 * @returns Strict VBox status code.
15178 * @param pVCpu The cross context virtual CPU structure.
15179 * @param cbInstr The instruction length in bytes.
15180 * @param u16Port The port to read.
15181 * @param fImm Whether the port is specified using an immediate operand or
15182 * using the implicit DX.
15183 * @param cbReg The register size.
15184 */
15185VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15186{
15187 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15188 Assert(cbReg <= 4 && cbReg != 3);
15189
15190 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15191 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15192 Assert(!pVCpu->iem.s.cActiveMappings);
15193 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15194}
15195
15196
15197/**
15198 * Interface for HM and EM to write to a CRx register.
15199 *
15200 * @returns Strict VBox status code.
15201 * @param pVCpu The cross context virtual CPU structure.
15202 * @param cbInstr The instruction length in bytes.
15203 * @param iCrReg The control register number (destination).
15204 * @param iGReg The general purpose register number (source).
15205 *
15206 * @remarks In ring-0 not all of the state needs to be synced in.
15207 */
15208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15209{
15210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15211 Assert(iCrReg < 16);
15212 Assert(iGReg < 16);
15213
15214 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15215 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15216 Assert(!pVCpu->iem.s.cActiveMappings);
15217 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15218}
15219
15220
15221/**
15222 * Interface for HM and EM to read from a CRx register.
15223 *
15224 * @returns Strict VBox status code.
15225 * @param pVCpu The cross context virtual CPU structure.
15226 * @param cbInstr The instruction length in bytes.
15227 * @param iGReg The general purpose register number (destination).
15228 * @param iCrReg The control register number (source).
15229 *
15230 * @remarks In ring-0 not all of the state needs to be synced in.
15231 */
15232VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15233{
15234 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15235 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15236 | CPUMCTX_EXTRN_APIC_TPR);
15237 Assert(iCrReg < 16);
15238 Assert(iGReg < 16);
15239
15240 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15241 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15242 Assert(!pVCpu->iem.s.cActiveMappings);
15243 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15244}
15245
15246
15247/**
15248 * Interface for HM and EM to clear the CR0[TS] bit.
15249 *
15250 * @returns Strict VBox status code.
15251 * @param pVCpu The cross context virtual CPU structure.
15252 * @param cbInstr The instruction length in bytes.
15253 *
15254 * @remarks In ring-0 not all of the state needs to be synced in.
15255 */
15256VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15257{
15258 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15259
15260 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15261 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15262 Assert(!pVCpu->iem.s.cActiveMappings);
15263 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15264}
15265
15266
15267/**
15268 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15269 *
15270 * @returns Strict VBox status code.
15271 * @param pVCpu The cross context virtual CPU structure.
15272 * @param cbInstr The instruction length in bytes.
15273 * @param uValue The value to load into CR0.
15274 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15275 * memory operand. Otherwise pass NIL_RTGCPTR.
15276 *
15277 * @remarks In ring-0 not all of the state needs to be synced in.
15278 */
15279VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15280{
15281 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15282
15283 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15284 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15285 Assert(!pVCpu->iem.s.cActiveMappings);
15286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15287}
15288
15289
15290/**
15291 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15292 *
15293 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15294 *
15295 * @returns Strict VBox status code.
15296 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15297 * @param cbInstr The instruction length in bytes.
15298 * @remarks In ring-0 not all of the state needs to be synced in.
15299 * @thread EMT(pVCpu)
15300 */
15301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15302{
15303 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15304
15305 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15306 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15307 Assert(!pVCpu->iem.s.cActiveMappings);
15308 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15309}
15310
15311
15312/**
15313 * Interface for HM and EM to emulate the WBINVD instruction.
15314 *
15315 * @returns Strict VBox status code.
15316 * @param pVCpu The cross context virtual CPU structure.
15317 * @param cbInstr The instruction length in bytes.
15318 *
15319 * @remarks In ring-0 not all of the state needs to be synced in.
15320 */
15321VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15322{
15323 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15324
15325 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15326 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15327 Assert(!pVCpu->iem.s.cActiveMappings);
15328 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15329}
15330
15331
15332/**
15333 * Interface for HM and EM to emulate the INVD instruction.
15334 *
15335 * @returns Strict VBox status code.
15336 * @param pVCpu The cross context virtual CPU structure.
15337 * @param cbInstr The instruction length in bytes.
15338 *
15339 * @remarks In ring-0 not all of the state needs to be synced in.
15340 */
15341VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15342{
15343 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15344
15345 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15346 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15347 Assert(!pVCpu->iem.s.cActiveMappings);
15348 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15349}
15350
15351
15352/**
15353 * Interface for HM and EM to emulate the INVLPG instruction.
15354 *
15355 * @returns Strict VBox status code.
15356 * @retval VINF_PGM_SYNC_CR3
15357 *
15358 * @param pVCpu The cross context virtual CPU structure.
15359 * @param cbInstr The instruction length in bytes.
15360 * @param GCPtrPage The effective address of the page to invalidate.
15361 *
15362 * @remarks In ring-0 not all of the state needs to be synced in.
15363 */
15364VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15365{
15366 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15367
15368 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15369 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15370 Assert(!pVCpu->iem.s.cActiveMappings);
15371 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15372}
15373
15374
15375/**
15376 * Interface for HM and EM to emulate the CPUID instruction.
15377 *
15378 * @returns Strict VBox status code.
15379 *
15380 * @param pVCpu The cross context virtual CPU structure.
15381 * @param cbInstr The instruction length in bytes.
15382 *
15383 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15384 */
15385VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15386{
15387 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15388 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15389
15390 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15391 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15392 Assert(!pVCpu->iem.s.cActiveMappings);
15393 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15394}
15395
15396
15397/**
15398 * Interface for HM and EM to emulate the RDPMC instruction.
15399 *
15400 * @returns Strict VBox status code.
15401 *
15402 * @param pVCpu The cross context virtual CPU structure.
15403 * @param cbInstr The instruction length in bytes.
15404 *
15405 * @remarks Not all of the state needs to be synced in.
15406 */
15407VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15408{
15409 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15410 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15411
15412 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15413 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15414 Assert(!pVCpu->iem.s.cActiveMappings);
15415 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15416}
15417
15418
15419/**
15420 * Interface for HM and EM to emulate the RDTSC instruction.
15421 *
15422 * @returns Strict VBox status code.
15423 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15424 *
15425 * @param pVCpu The cross context virtual CPU structure.
15426 * @param cbInstr The instruction length in bytes.
15427 *
15428 * @remarks Not all of the state needs to be synced in.
15429 */
15430VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15431{
15432 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15433 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15434
15435 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15436 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15437 Assert(!pVCpu->iem.s.cActiveMappings);
15438 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15439}
15440
15441
15442/**
15443 * Interface for HM and EM to emulate the RDTSCP instruction.
15444 *
15445 * @returns Strict VBox status code.
15446 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15447 *
15448 * @param pVCpu The cross context virtual CPU structure.
15449 * @param cbInstr The instruction length in bytes.
15450 *
15451 * @remarks Not all of the state needs to be synced in. Recommended
15452 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15453 */
15454VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15455{
15456 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15457 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15458
15459 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15460 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15461 Assert(!pVCpu->iem.s.cActiveMappings);
15462 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15463}
15464
15465
15466/**
15467 * Interface for HM and EM to emulate the RDMSR instruction.
15468 *
15469 * @returns Strict VBox status code.
15470 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15471 *
15472 * @param pVCpu The cross context virtual CPU structure.
15473 * @param cbInstr The instruction length in bytes.
15474 *
15475 * @remarks Not all of the state needs to be synced in. Requires RCX and
15476 * (currently) all MSRs.
15477 */
15478VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15479{
15480 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15481 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15482
15483 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15484 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15485 Assert(!pVCpu->iem.s.cActiveMappings);
15486 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15487}
15488
15489
15490/**
15491 * Interface for HM and EM to emulate the WRMSR instruction.
15492 *
15493 * @returns Strict VBox status code.
15494 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15495 *
15496 * @param pVCpu The cross context virtual CPU structure.
15497 * @param cbInstr The instruction length in bytes.
15498 *
15499 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15500 * and (currently) all MSRs.
15501 */
15502VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15503{
15504 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15505 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15506 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15507
15508 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15509 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15510 Assert(!pVCpu->iem.s.cActiveMappings);
15511 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15512}
15513
15514
15515/**
15516 * Interface for HM and EM to emulate the MONITOR instruction.
15517 *
15518 * @returns Strict VBox status code.
15519 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15520 *
15521 * @param pVCpu The cross context virtual CPU structure.
15522 * @param cbInstr The instruction length in bytes.
15523 *
15524 * @remarks Not all of the state needs to be synced in.
15525 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15526 * are used.
15527 */
15528VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15529{
15530 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15531 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15532
15533 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15534 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15535 Assert(!pVCpu->iem.s.cActiveMappings);
15536 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15537}
15538
15539
15540/**
15541 * Interface for HM and EM to emulate the MWAIT instruction.
15542 *
15543 * @returns Strict VBox status code.
15544 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15545 *
15546 * @param pVCpu The cross context virtual CPU structure.
15547 * @param cbInstr The instruction length in bytes.
15548 *
15549 * @remarks Not all of the state needs to be synced in.
15550 */
15551VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15552{
15553 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15554
15555 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15556 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15557 Assert(!pVCpu->iem.s.cActiveMappings);
15558 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15559}
15560
15561
15562/**
15563 * Interface for HM and EM to emulate the HLT instruction.
15564 *
15565 * @returns Strict VBox status code.
15566 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15567 *
15568 * @param pVCpu The cross context virtual CPU structure.
15569 * @param cbInstr The instruction length in bytes.
15570 *
15571 * @remarks Not all of the state needs to be synced in.
15572 */
15573VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15574{
15575 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15576
15577 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15578 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15579 Assert(!pVCpu->iem.s.cActiveMappings);
15580 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15581}
15582
15583
15584/**
15585 * Checks if IEM is in the process of delivering an event (interrupt or
15586 * exception).
15587 *
15588 * @returns true if we're in the process of raising an interrupt or exception,
15589 * false otherwise.
15590 * @param pVCpu The cross context virtual CPU structure.
15591 * @param puVector Where to store the vector associated with the
15592 * currently delivered event, optional.
15593 * @param pfFlags Where to store th event delivery flags (see
15594 * IEM_XCPT_FLAGS_XXX), optional.
15595 * @param puErr Where to store the error code associated with the
15596 * event, optional.
15597 * @param puCr2 Where to store the CR2 associated with the event,
15598 * optional.
15599 * @remarks The caller should check the flags to determine if the error code and
15600 * CR2 are valid for the event.
15601 */
15602VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15603{
15604 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15605 if (fRaisingXcpt)
15606 {
15607 if (puVector)
15608 *puVector = pVCpu->iem.s.uCurXcpt;
15609 if (pfFlags)
15610 *pfFlags = pVCpu->iem.s.fCurXcpt;
15611 if (puErr)
15612 *puErr = pVCpu->iem.s.uCurXcptErr;
15613 if (puCr2)
15614 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15615 }
15616 return fRaisingXcpt;
15617}
15618
15619#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15620
15621/**
15622 * Interface for HM and EM to emulate the CLGI instruction.
15623 *
15624 * @returns Strict VBox status code.
15625 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15626 * @param cbInstr The instruction length in bytes.
15627 * @thread EMT(pVCpu)
15628 */
15629VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15630{
15631 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15632
15633 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15634 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15635 Assert(!pVCpu->iem.s.cActiveMappings);
15636 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15637}
15638
15639
15640/**
15641 * Interface for HM and EM to emulate the STGI instruction.
15642 *
15643 * @returns Strict VBox status code.
15644 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15645 * @param cbInstr The instruction length in bytes.
15646 * @thread EMT(pVCpu)
15647 */
15648VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15649{
15650 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15651
15652 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15653 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15654 Assert(!pVCpu->iem.s.cActiveMappings);
15655 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15656}
15657
15658
15659/**
15660 * Interface for HM and EM to emulate the VMLOAD instruction.
15661 *
15662 * @returns Strict VBox status code.
15663 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15664 * @param cbInstr The instruction length in bytes.
15665 * @thread EMT(pVCpu)
15666 */
15667VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15668{
15669 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15670
15671 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15672 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15673 Assert(!pVCpu->iem.s.cActiveMappings);
15674 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15675}
15676
15677
15678/**
15679 * Interface for HM and EM to emulate the VMSAVE instruction.
15680 *
15681 * @returns Strict VBox status code.
15682 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15683 * @param cbInstr The instruction length in bytes.
15684 * @thread EMT(pVCpu)
15685 */
15686VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15687{
15688 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15689
15690 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15691 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15692 Assert(!pVCpu->iem.s.cActiveMappings);
15693 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15694}
15695
15696
15697/**
15698 * Interface for HM and EM to emulate the INVLPGA instruction.
15699 *
15700 * @returns Strict VBox status code.
15701 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15702 * @param cbInstr The instruction length in bytes.
15703 * @thread EMT(pVCpu)
15704 */
15705VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15706{
15707 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15708
15709 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15710 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15711 Assert(!pVCpu->iem.s.cActiveMappings);
15712 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15713}
15714
15715
15716/**
15717 * Interface for HM and EM to emulate the VMRUN instruction.
15718 *
15719 * @returns Strict VBox status code.
15720 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15721 * @param cbInstr The instruction length in bytes.
15722 * @thread EMT(pVCpu)
15723 */
15724VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15725{
15726 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15727 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15728
15729 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15730 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15731 Assert(!pVCpu->iem.s.cActiveMappings);
15732 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15733}
15734
15735
15736/**
15737 * Interface for HM and EM to emulate \#VMEXIT.
15738 *
15739 * @returns Strict VBox status code.
15740 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15741 * @param uExitCode The exit code.
15742 * @param uExitInfo1 The exit info. 1 field.
15743 * @param uExitInfo2 The exit info. 2 field.
15744 * @thread EMT(pVCpu)
15745 */
15746VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15747{
15748 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15749 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15750 if (pVCpu->iem.s.cActiveMappings)
15751 iemMemRollback(pVCpu);
15752 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15753}
15754
15755#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15756
15757#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15758
15759/**
15760 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15761 *
15762 * @returns Strict VBox status code.
15763 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15764 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15765 * the x2APIC device.
15766 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15767 *
15768 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15769 * @param idMsr The MSR being read.
15770 * @param pu64Value Pointer to the value being written or where to store the
15771 * value being read.
15772 * @param fWrite Whether this is an MSR write or read access.
15773 * @thread EMT(pVCpu)
15774 */
15775VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15776{
15777 Assert(pu64Value);
15778
15779 VBOXSTRICTRC rcStrict;
15780 if (!fWrite)
15781 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15782 else
15783 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15784 if (pVCpu->iem.s.cActiveMappings)
15785 iemMemRollback(pVCpu);
15786 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15787
15788}
15789
15790
15791/**
15792 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15793 *
15794 * @returns Strict VBox status code.
15795 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15796 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15797 *
15798 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15799 * @param offAccess The offset of the register being accessed (within the
15800 * APIC-access page).
15801 * @param cbAccess The size of the access in bytes.
15802 * @param pvData Pointer to the data being written or where to store the data
15803 * being read.
15804 * @param fWrite Whether this is a write or read access.
15805 * @thread EMT(pVCpu)
15806 */
15807VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
15808 bool fWrite)
15809{
15810 Assert(pvData);
15811
15812 /** @todo NSTVMX: Unfortunately, the caller has no idea about instruction fetch
15813 * accesses, so we only use read/write here. Maybe in the future the PGM
15814 * physical handler will be extended to include this information? */
15815 uint32_t const fAccess = fWrite ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
15816 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbAccess, pvData, fAccess);
15817 if (pVCpu->iem.s.cActiveMappings)
15818 iemMemRollback(pVCpu);
15819 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15820}
15821
15822
15823/**
15824 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15825 * VM-exit.
15826 *
15827 * @returns Strict VBox status code.
15828 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15829 * @thread EMT(pVCpu)
15830 */
15831VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15832{
15833 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15834 if (pVCpu->iem.s.cActiveMappings)
15835 iemMemRollback(pVCpu);
15836 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15837}
15838
15839
15840/**
15841 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15842 *
15843 * @returns Strict VBox status code.
15844 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15845 * @thread EMT(pVCpu)
15846 */
15847VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15848{
15849 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15850 if (pVCpu->iem.s.cActiveMappings)
15851 iemMemRollback(pVCpu);
15852 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15853}
15854
15855
15856/**
15857 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15858 *
15859 * @returns Strict VBox status code.
15860 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15861 * @param uVector The external interrupt vector (pass 0 if the external
15862 * interrupt is still pending).
15863 * @param fIntPending Whether the external interrupt is pending or
15864 * acknowdledged in the interrupt controller.
15865 * @thread EMT(pVCpu)
15866 */
15867VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15868{
15869 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15870 if (pVCpu->iem.s.cActiveMappings)
15871 iemMemRollback(pVCpu);
15872 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15873}
15874
15875
15876/**
15877 * Interface for HM and EM to emulate VM-exit due to NMIs.
15878 *
15879 * @returns Strict VBox status code.
15880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15881 * @thread EMT(pVCpu)
15882 */
15883VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmi(PVMCPU pVCpu)
15884{
15885 VBOXSTRICTRC rcStrict = iemVmxVmexitNmi(pVCpu);
15886 if (pVCpu->iem.s.cActiveMappings)
15887 iemMemRollback(pVCpu);
15888 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15889}
15890
15891
15892/**
15893 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15894 *
15895 * @returns Strict VBox status code.
15896 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15897 * @param uVector The SIPI vector.
15898 * @thread EMT(pVCpu)
15899 */
15900VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15901{
15902 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15903 if (pVCpu->iem.s.cActiveMappings)
15904 iemMemRollback(pVCpu);
15905 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15906}
15907
15908
15909/**
15910 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15911 *
15912 * @returns Strict VBox status code.
15913 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15914 * @thread EMT(pVCpu)
15915 */
15916VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15917{
15918 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15919 if (pVCpu->iem.s.cActiveMappings)
15920 iemMemRollback(pVCpu);
15921 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15922}
15923
15924
15925/**
15926 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15927 *
15928 * @returns Strict VBox status code.
15929 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15930 * @thread EMT(pVCpu)
15931 */
15932VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15933{
15934 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15935 if (pVCpu->iem.s.cActiveMappings)
15936 iemMemRollback(pVCpu);
15937 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15938}
15939
15940
15941/**
15942 * Interface for HM and EM to emulate VM-exits for NMI-windows.
15943 *
15944 * @returns Strict VBox status code.
15945 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15946 * @thread EMT(pVCpu)
15947 */
15948VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitNmiWindow(PVMCPU pVCpu)
15949{
15950 VBOXSTRICTRC rcStrict = iemVmxVmexitNmiWindow(pVCpu);
15951 if (pVCpu->iem.s.cActiveMappings)
15952 iemMemRollback(pVCpu);
15953 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15954}
15955
15956
15957/**
15958 * Interface for HM and EM to emulate VM-exits Monitor-Trap Flag (MTF).
15959 *
15960 * @returns Strict VBox status code.
15961 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15962 * @thread EMT(pVCpu)
15963 */
15964VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitMtf(PVMCPU pVCpu)
15965{
15966 VBOXSTRICTRC rcStrict = iemVmxVmexitMtf(pVCpu);
15967 if (pVCpu->iem.s.cActiveMappings)
15968 iemMemRollback(pVCpu);
15969 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15970}
15971
15972
15973/**
15974 * Interface for HM and EM to emulate the VMREAD instruction.
15975 *
15976 * @returns Strict VBox status code.
15977 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15978 * @param pExitInfo Pointer to the VM-exit information struct.
15979 * @thread EMT(pVCpu)
15980 */
15981VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15982{
15983 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15984 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15985 Assert(pExitInfo);
15986
15987 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15988
15989 VBOXSTRICTRC rcStrict;
15990 uint8_t const cbInstr = pExitInfo->cbInstr;
15991 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15992 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15993 {
15994 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15995 {
15996 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15997 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15998 }
15999 else
16000 {
16001 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16002 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
16003 }
16004 }
16005 else
16006 {
16007 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
16008 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16009 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
16010 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
16011 }
16012 Assert(!pVCpu->iem.s.cActiveMappings);
16013 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16014}
16015
16016
16017/**
16018 * Interface for HM and EM to emulate the VMWRITE instruction.
16019 *
16020 * @returns Strict VBox status code.
16021 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16022 * @param pExitInfo Pointer to the VM-exit information struct.
16023 * @thread EMT(pVCpu)
16024 */
16025VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16026{
16027 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16028 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16029 Assert(pExitInfo);
16030
16031 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16032
16033 uint64_t u64Val;
16034 uint8_t iEffSeg;
16035 IEMMODE enmEffAddrMode;
16036 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16037 {
16038 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16039 iEffSeg = UINT8_MAX;
16040 enmEffAddrMode = UINT8_MAX;
16041 }
16042 else
16043 {
16044 u64Val = pExitInfo->GCPtrEffAddr;
16045 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16046 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
16047 }
16048 uint8_t const cbInstr = pExitInfo->cbInstr;
16049 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16050 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
16051 Assert(!pVCpu->iem.s.cActiveMappings);
16052 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16053}
16054
16055
16056/**
16057 * Interface for HM and EM to emulate the VMPTRLD instruction.
16058 *
16059 * @returns Strict VBox status code.
16060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16061 * @param pExitInfo Pointer to the VM-exit information struct.
16062 * @thread EMT(pVCpu)
16063 */
16064VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16065{
16066 Assert(pExitInfo);
16067 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16068 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16069
16070 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16071
16072 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16073 uint8_t const cbInstr = pExitInfo->cbInstr;
16074 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16075 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16076 Assert(!pVCpu->iem.s.cActiveMappings);
16077 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16078}
16079
16080
16081/**
16082 * Interface for HM and EM to emulate the VMPTRST instruction.
16083 *
16084 * @returns Strict VBox status code.
16085 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16086 * @param pExitInfo Pointer to the VM-exit information struct.
16087 * @thread EMT(pVCpu)
16088 */
16089VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16090{
16091 Assert(pExitInfo);
16092 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16093 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16094
16095 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16096
16097 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16098 uint8_t const cbInstr = pExitInfo->cbInstr;
16099 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16100 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16101 Assert(!pVCpu->iem.s.cActiveMappings);
16102 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16103}
16104
16105
16106/**
16107 * Interface for HM and EM to emulate the VMCLEAR instruction.
16108 *
16109 * @returns Strict VBox status code.
16110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16111 * @param pExitInfo Pointer to the VM-exit information struct.
16112 * @thread EMT(pVCpu)
16113 */
16114VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16115{
16116 Assert(pExitInfo);
16117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16118 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16119
16120 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16121
16122 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16123 uint8_t const cbInstr = pExitInfo->cbInstr;
16124 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16125 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16126 Assert(!pVCpu->iem.s.cActiveMappings);
16127 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16128}
16129
16130
16131/**
16132 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16133 *
16134 * @returns Strict VBox status code.
16135 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16136 * @param cbInstr The instruction length in bytes.
16137 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16138 * VMXINSTRID_VMRESUME).
16139 * @thread EMT(pVCpu)
16140 */
16141VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16142{
16143 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16144 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16145
16146 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16147 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16148 Assert(!pVCpu->iem.s.cActiveMappings);
16149 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16150}
16151
16152
16153/**
16154 * Interface for HM and EM to emulate the VMXON instruction.
16155 *
16156 * @returns Strict VBox status code.
16157 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16158 * @param pExitInfo Pointer to the VM-exit information struct.
16159 * @thread EMT(pVCpu)
16160 */
16161VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16162{
16163 Assert(pExitInfo);
16164 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16165 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16166
16167 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16168
16169 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16170 uint8_t const cbInstr = pExitInfo->cbInstr;
16171 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16172 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16173 Assert(!pVCpu->iem.s.cActiveMappings);
16174 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16175}
16176
16177
16178/**
16179 * Interface for HM and EM to emulate the VMXOFF instruction.
16180 *
16181 * @returns Strict VBox status code.
16182 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16183 * @param cbInstr The instruction length in bytes.
16184 * @thread EMT(pVCpu)
16185 */
16186VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16187{
16188 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16189 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16190
16191 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16192 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16193 Assert(!pVCpu->iem.s.cActiveMappings);
16194 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16195}
16196
16197
16198/**
16199 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16200 *
16201 * @remarks The @a pvUser argument is currently unused.
16202 */
16203PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16204 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16205 PGMACCESSORIGIN enmOrigin, void *pvUser)
16206{
16207 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16208
16209 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16210 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16211 {
16212 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16213 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16214
16215 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16216 * Currently they will go through as read accesses. */
16217 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16218 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16219 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16220 if (RT_FAILURE(rcStrict))
16221 return rcStrict;
16222
16223 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16224 return VINF_SUCCESS;
16225 }
16226
16227 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16228 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16229 if (RT_FAILURE(rc))
16230 return rc;
16231
16232 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16233 return VINF_PGM_HANDLER_DO_DEFAULT;
16234}
16235
16236#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16237
16238#ifdef IN_RING3
16239
16240/**
16241 * Handles the unlikely and probably fatal merge cases.
16242 *
16243 * @returns Merged status code.
16244 * @param rcStrict Current EM status code.
16245 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16246 * with @a rcStrict.
16247 * @param iMemMap The memory mapping index. For error reporting only.
16248 * @param pVCpu The cross context virtual CPU structure of the calling
16249 * thread, for error reporting only.
16250 */
16251DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16252 unsigned iMemMap, PVMCPU pVCpu)
16253{
16254 if (RT_FAILURE_NP(rcStrict))
16255 return rcStrict;
16256
16257 if (RT_FAILURE_NP(rcStrictCommit))
16258 return rcStrictCommit;
16259
16260 if (rcStrict == rcStrictCommit)
16261 return rcStrictCommit;
16262
16263 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16264 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16265 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16266 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16267 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16268 return VERR_IOM_FF_STATUS_IPE;
16269}
16270
16271
16272/**
16273 * Helper for IOMR3ProcessForceFlag.
16274 *
16275 * @returns Merged status code.
16276 * @param rcStrict Current EM status code.
16277 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16278 * with @a rcStrict.
16279 * @param iMemMap The memory mapping index. For error reporting only.
16280 * @param pVCpu The cross context virtual CPU structure of the calling
16281 * thread, for error reporting only.
16282 */
16283DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16284{
16285 /* Simple. */
16286 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16287 return rcStrictCommit;
16288
16289 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16290 return rcStrict;
16291
16292 /* EM scheduling status codes. */
16293 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16294 && rcStrict <= VINF_EM_LAST))
16295 {
16296 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16297 && rcStrictCommit <= VINF_EM_LAST))
16298 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16299 }
16300
16301 /* Unlikely */
16302 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16303}
16304
16305
16306/**
16307 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16308 *
16309 * @returns Merge between @a rcStrict and what the commit operation returned.
16310 * @param pVM The cross context VM structure.
16311 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16312 * @param rcStrict The status code returned by ring-0 or raw-mode.
16313 */
16314VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16315{
16316 /*
16317 * Reset the pending commit.
16318 */
16319 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16320 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16321 ("%#x %#x %#x\n",
16322 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16323 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16324
16325 /*
16326 * Commit the pending bounce buffers (usually just one).
16327 */
16328 unsigned cBufs = 0;
16329 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16330 while (iMemMap-- > 0)
16331 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16332 {
16333 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16334 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16335 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16336
16337 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16338 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16339 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16340
16341 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16342 {
16343 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16344 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16345 pbBuf,
16346 cbFirst,
16347 PGMACCESSORIGIN_IEM);
16348 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16349 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16350 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16351 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16352 }
16353
16354 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16355 {
16356 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16357 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16358 pbBuf + cbFirst,
16359 cbSecond,
16360 PGMACCESSORIGIN_IEM);
16361 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16362 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16363 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16364 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16365 }
16366 cBufs++;
16367 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16368 }
16369
16370 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16371 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16372 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16373 pVCpu->iem.s.cActiveMappings = 0;
16374 return rcStrict;
16375}
16376
16377#endif /* IN_RING3 */
16378
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette