VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 77382

Last change on this file since 77382 was 77382, checked in by vboxsync, 6 years ago

VMM: Further improvments on the IEM timer polling by making it fully configurable. Sideeffect is that EMSTATE_IEM_THEN_REM has now become more accurate and will execute fewer instructions (exactly 1024 rather than up to 1023+4096) before switching to REM. [burn fix?]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 646.2 KB
Line 
1/* $Id: IEMAll.cpp 77382 2019-02-20 13:44:22Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#include <VBox/vmm/vm.h>
118#include <VBox/log.h>
119#include <VBox/err.h>
120#include <VBox/param.h>
121#include <VBox/dis.h>
122#include <VBox/disopcode.h>
123#include <iprt/asm-math.h>
124#include <iprt/assert.h>
125#include <iprt/string.h>
126#include <iprt/x86.h>
127
128
129/*********************************************************************************************************************************
130* Structures and Typedefs *
131*********************************************************************************************************************************/
132/** @typedef PFNIEMOP
133 * Pointer to an opcode decoder function.
134 */
135
136/** @def FNIEMOP_DEF
137 * Define an opcode decoder function.
138 *
139 * We're using macors for this so that adding and removing parameters as well as
140 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
141 *
142 * @param a_Name The function name.
143 */
144
145/** @typedef PFNIEMOPRM
146 * Pointer to an opcode decoder function with RM byte.
147 */
148
149/** @def FNIEMOPRM_DEF
150 * Define an opcode decoder function with RM byte.
151 *
152 * We're using macors for this so that adding and removing parameters as well as
153 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
154 *
155 * @param a_Name The function name.
156 */
157
158#if defined(__GNUC__) && defined(RT_ARCH_X86)
159typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
160typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
161# define FNIEMOP_DEF(a_Name) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
163# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
164 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
165# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
167
168#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
169typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
170typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
171# define FNIEMOP_DEF(a_Name) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
173# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
174 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
175# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
177
178#elif defined(__GNUC__)
179typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
180typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
181# define FNIEMOP_DEF(a_Name) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
183# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
184 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
185# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
187
188#else
189typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
190typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
191# define FNIEMOP_DEF(a_Name) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
193# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
194 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
195# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
197
198#endif
199#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
200
201
202/**
203 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
204 */
205typedef union IEMSELDESC
206{
207 /** The legacy view. */
208 X86DESC Legacy;
209 /** The long mode view. */
210 X86DESC64 Long;
211} IEMSELDESC;
212/** Pointer to a selector descriptor table entry. */
213typedef IEMSELDESC *PIEMSELDESC;
214
215/**
216 * CPU exception classes.
217 */
218typedef enum IEMXCPTCLASS
219{
220 IEMXCPTCLASS_BENIGN,
221 IEMXCPTCLASS_CONTRIBUTORY,
222 IEMXCPTCLASS_PAGE_FAULT,
223 IEMXCPTCLASS_DOUBLE_FAULT
224} IEMXCPTCLASS;
225
226
227/*********************************************************************************************************************************
228* Defined Constants And Macros *
229*********************************************************************************************************************************/
230/** @def IEM_WITH_SETJMP
231 * Enables alternative status code handling using setjmps.
232 *
233 * This adds a bit of expense via the setjmp() call since it saves all the
234 * non-volatile registers. However, it eliminates return code checks and allows
235 * for more optimal return value passing (return regs instead of stack buffer).
236 */
237#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
238# define IEM_WITH_SETJMP
239#endif
240
241/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
242 * due to GCC lacking knowledge about the value range of a switch. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
244
245/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
246#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
247
248/**
249 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
250 * occation.
251 */
252#ifdef LOG_ENABLED
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 do { \
255 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
257 } while (0)
258#else
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
261#endif
262
263/**
264 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
265 * occation using the supplied logger statement.
266 *
267 * @param a_LoggerArgs What to log on failure.
268 */
269#ifdef LOG_ENABLED
270# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
271 do { \
272 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
273 /*LogFunc(a_LoggerArgs);*/ \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
275 } while (0)
276#else
277# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
278 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
279#endif
280
281/**
282 * Call an opcode decoder function.
283 *
284 * We're using macors for this so that adding and removing parameters can be
285 * done as we please. See FNIEMOP_DEF.
286 */
287#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
288
289/**
290 * Call a common opcode decoder function taking one extra argument.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF_1.
294 */
295#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
304
305/**
306 * Check if we're currently executing in real or virtual 8086 mode.
307 *
308 * @returns @c true if it is, @c false if not.
309 * @param a_pVCpu The IEM state of the current CPU.
310 */
311#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
312
313/**
314 * Check if we're currently executing in virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
318 */
319#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in long mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in a 64-bit code segment.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402/**
403 * Check if the nested-guest has the given Pin-based VM-execution control set.
404 */
405# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
406 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
407
408/**
409 * Check if the nested-guest has the given Processor-based VM-execution control set.
410 */
411#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
412 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
413
414/**
415 * Check if the nested-guest has the given Secondary Processor-based VM-execution
416 * control set.
417 */
418#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
419 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept.
423 */
424# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
425 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
426
427/**
428 * Invokes the VMX VM-exit handler for an instruction intercept where the
429 * instruction provides additional VM-exit information.
430 */
431# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
432 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for a task switch.
436 */
437# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
438 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for MWAIT.
442 */
443# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
444 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handle for triple faults.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
450 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463
464#endif
465
466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
467/**
468 * Check if an SVM control/instruction intercept is set.
469 */
470# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
471 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
472
473/**
474 * Check if an SVM read CRx intercept is set.
475 */
476# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM write CRx intercept is set.
481 */
482# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
483 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
484
485/**
486 * Check if an SVM read DRx intercept is set.
487 */
488# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM write DRx intercept is set.
493 */
494# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
495 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
501 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
502
503/**
504 * Invokes the SVM \#VMEXIT handler for the nested-guest.
505 */
506# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
507 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
508
509/**
510 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
511 * corresponding decode assist information.
512 */
513# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
514 do \
515 { \
516 uint64_t uExitInfo1; \
517 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
518 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
519 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
520 else \
521 uExitInfo1 = 0; \
522 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
523 } while (0)
524
525/** Check and handles SVM nested-guest instruction intercept and updates
526 * NRIP if needed.
527 */
528# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
529 do \
530 { \
531 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
532 { \
533 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
534 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
535 } \
536 } while (0)
537
538/** Checks and handles SVM nested-guest CR0 read intercept. */
539# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
540 do \
541 { \
542 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
543 { /* probably likely */ } \
544 else \
545 { \
546 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
547 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
548 } \
549 } while (0)
550
551/**
552 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
553 */
554# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
555 do { \
556 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
557 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
558 } while (0)
559
560#else
561# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
562# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
563# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
565# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
567# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
568# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
570# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
572
573#endif
574
575
576/*********************************************************************************************************************************
577* Global Variables *
578*********************************************************************************************************************************/
579extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
580
581
582/** Function table for the ADD instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
584{
585 iemAImpl_add_u8, iemAImpl_add_u8_locked,
586 iemAImpl_add_u16, iemAImpl_add_u16_locked,
587 iemAImpl_add_u32, iemAImpl_add_u32_locked,
588 iemAImpl_add_u64, iemAImpl_add_u64_locked
589};
590
591/** Function table for the ADC instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
593{
594 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
595 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
596 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
597 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
598};
599
600/** Function table for the SUB instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
602{
603 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
604 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
605 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
606 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
607};
608
609/** Function table for the SBB instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
611{
612 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
613 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
614 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
615 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
616};
617
618/** Function table for the OR instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
620{
621 iemAImpl_or_u8, iemAImpl_or_u8_locked,
622 iemAImpl_or_u16, iemAImpl_or_u16_locked,
623 iemAImpl_or_u32, iemAImpl_or_u32_locked,
624 iemAImpl_or_u64, iemAImpl_or_u64_locked
625};
626
627/** Function table for the XOR instruction. */
628IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
629{
630 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
631 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
632 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
633 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
634};
635
636/** Function table for the AND instruction. */
637IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
638{
639 iemAImpl_and_u8, iemAImpl_and_u8_locked,
640 iemAImpl_and_u16, iemAImpl_and_u16_locked,
641 iemAImpl_and_u32, iemAImpl_and_u32_locked,
642 iemAImpl_and_u64, iemAImpl_and_u64_locked
643};
644
645/** Function table for the CMP instruction.
646 * @remarks Making operand order ASSUMPTIONS.
647 */
648IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
649{
650 iemAImpl_cmp_u8, NULL,
651 iemAImpl_cmp_u16, NULL,
652 iemAImpl_cmp_u32, NULL,
653 iemAImpl_cmp_u64, NULL
654};
655
656/** Function table for the TEST instruction.
657 * @remarks Making operand order ASSUMPTIONS.
658 */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
660{
661 iemAImpl_test_u8, NULL,
662 iemAImpl_test_u16, NULL,
663 iemAImpl_test_u32, NULL,
664 iemAImpl_test_u64, NULL
665};
666
667/** Function table for the BT instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
669{
670 NULL, NULL,
671 iemAImpl_bt_u16, NULL,
672 iemAImpl_bt_u32, NULL,
673 iemAImpl_bt_u64, NULL
674};
675
676/** Function table for the BTC instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
678{
679 NULL, NULL,
680 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
681 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
682 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
683};
684
685/** Function table for the BTR instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
687{
688 NULL, NULL,
689 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
690 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
691 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
692};
693
694/** Function table for the BTS instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
696{
697 NULL, NULL,
698 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
699 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
700 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
701};
702
703/** Function table for the BSF instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
705{
706 NULL, NULL,
707 iemAImpl_bsf_u16, NULL,
708 iemAImpl_bsf_u32, NULL,
709 iemAImpl_bsf_u64, NULL
710};
711
712/** Function table for the BSR instruction. */
713IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
714{
715 NULL, NULL,
716 iemAImpl_bsr_u16, NULL,
717 iemAImpl_bsr_u32, NULL,
718 iemAImpl_bsr_u64, NULL
719};
720
721/** Function table for the IMUL instruction. */
722IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
723{
724 NULL, NULL,
725 iemAImpl_imul_two_u16, NULL,
726 iemAImpl_imul_two_u32, NULL,
727 iemAImpl_imul_two_u64, NULL
728};
729
730/** Group 1 /r lookup table. */
731IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
732{
733 &g_iemAImpl_add,
734 &g_iemAImpl_or,
735 &g_iemAImpl_adc,
736 &g_iemAImpl_sbb,
737 &g_iemAImpl_and,
738 &g_iemAImpl_sub,
739 &g_iemAImpl_xor,
740 &g_iemAImpl_cmp
741};
742
743/** Function table for the INC instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
745{
746 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
747 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
748 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
749 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
750};
751
752/** Function table for the DEC instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
754{
755 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
756 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
757 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
758 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
759};
760
761/** Function table for the NEG instruction. */
762IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
763{
764 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
765 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
766 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
767 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
768};
769
770/** Function table for the NOT instruction. */
771IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
772{
773 iemAImpl_not_u8, iemAImpl_not_u8_locked,
774 iemAImpl_not_u16, iemAImpl_not_u16_locked,
775 iemAImpl_not_u32, iemAImpl_not_u32_locked,
776 iemAImpl_not_u64, iemAImpl_not_u64_locked
777};
778
779
780/** Function table for the ROL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
782{
783 iemAImpl_rol_u8,
784 iemAImpl_rol_u16,
785 iemAImpl_rol_u32,
786 iemAImpl_rol_u64
787};
788
789/** Function table for the ROR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
791{
792 iemAImpl_ror_u8,
793 iemAImpl_ror_u16,
794 iemAImpl_ror_u32,
795 iemAImpl_ror_u64
796};
797
798/** Function table for the RCL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
800{
801 iemAImpl_rcl_u8,
802 iemAImpl_rcl_u16,
803 iemAImpl_rcl_u32,
804 iemAImpl_rcl_u64
805};
806
807/** Function table for the RCR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
809{
810 iemAImpl_rcr_u8,
811 iemAImpl_rcr_u16,
812 iemAImpl_rcr_u32,
813 iemAImpl_rcr_u64
814};
815
816/** Function table for the SHL instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
818{
819 iemAImpl_shl_u8,
820 iemAImpl_shl_u16,
821 iemAImpl_shl_u32,
822 iemAImpl_shl_u64
823};
824
825/** Function table for the SHR instruction. */
826IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
827{
828 iemAImpl_shr_u8,
829 iemAImpl_shr_u16,
830 iemAImpl_shr_u32,
831 iemAImpl_shr_u64
832};
833
834/** Function table for the SAR instruction. */
835IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
836{
837 iemAImpl_sar_u8,
838 iemAImpl_sar_u16,
839 iemAImpl_sar_u32,
840 iemAImpl_sar_u64
841};
842
843
844/** Function table for the MUL instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
846{
847 iemAImpl_mul_u8,
848 iemAImpl_mul_u16,
849 iemAImpl_mul_u32,
850 iemAImpl_mul_u64
851};
852
853/** Function table for the IMUL instruction working implicitly on rAX. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
855{
856 iemAImpl_imul_u8,
857 iemAImpl_imul_u16,
858 iemAImpl_imul_u32,
859 iemAImpl_imul_u64
860};
861
862/** Function table for the DIV instruction. */
863IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
864{
865 iemAImpl_div_u8,
866 iemAImpl_div_u16,
867 iemAImpl_div_u32,
868 iemAImpl_div_u64
869};
870
871/** Function table for the MUL instruction. */
872IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
873{
874 iemAImpl_idiv_u8,
875 iemAImpl_idiv_u16,
876 iemAImpl_idiv_u32,
877 iemAImpl_idiv_u64
878};
879
880/** Function table for the SHLD instruction */
881IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
882{
883 iemAImpl_shld_u16,
884 iemAImpl_shld_u32,
885 iemAImpl_shld_u64,
886};
887
888/** Function table for the SHRD instruction */
889IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
890{
891 iemAImpl_shrd_u16,
892 iemAImpl_shrd_u32,
893 iemAImpl_shrd_u64,
894};
895
896
897/** Function table for the PUNPCKLBW instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
899/** Function table for the PUNPCKLBD instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
901/** Function table for the PUNPCKLDQ instruction */
902IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
903/** Function table for the PUNPCKLQDQ instruction */
904IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
905
906/** Function table for the PUNPCKHBW instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
908/** Function table for the PUNPCKHBD instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
910/** Function table for the PUNPCKHDQ instruction */
911IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
912/** Function table for the PUNPCKHQDQ instruction */
913IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
914
915/** Function table for the PXOR instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
917/** Function table for the PCMPEQB instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
919/** Function table for the PCMPEQW instruction */
920IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
921/** Function table for the PCMPEQD instruction */
922IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
923
924
925#if defined(IEM_LOG_MEMORY_WRITES)
926/** What IEM just wrote. */
927uint8_t g_abIemWrote[256];
928/** How much IEM just wrote. */
929size_t g_cbIemWrote;
930#endif
931
932
933/*********************************************************************************************************************************
934* Internal Functions *
935*********************************************************************************************************************************/
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
940/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
941IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
945IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
948IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
951IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
953#ifdef IEM_WITH_SETJMP
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
959#endif
960
961IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
962IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
976IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
977IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
978
979#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPU pVCpu);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
987IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
988IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
989IEM_STATIC VBOXSTRICTRC iemVmxVmexitMtf(PVMCPU pVCpu);
990IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
991IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
992IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value);
993IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPU pVCpu, uint32_t idMsr, uint64_t u64Value);
994#endif
995
996#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
997IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
998IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
999#endif
1000
1001
1002/**
1003 * Sets the pass up status.
1004 *
1005 * @returns VINF_SUCCESS.
1006 * @param pVCpu The cross context virtual CPU structure of the
1007 * calling thread.
1008 * @param rcPassUp The pass up status. Must be informational.
1009 * VINF_SUCCESS is not allowed.
1010 */
1011IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1012{
1013 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1014
1015 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1016 if (rcOldPassUp == VINF_SUCCESS)
1017 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1018 /* If both are EM scheduling codes, use EM priority rules. */
1019 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1020 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1021 {
1022 if (rcPassUp < rcOldPassUp)
1023 {
1024 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1025 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1026 }
1027 else
1028 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1029 }
1030 /* Override EM scheduling with specific status code. */
1031 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1032 {
1033 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1034 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1035 }
1036 /* Don't override specific status code, first come first served. */
1037 else
1038 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1039 return VINF_SUCCESS;
1040}
1041
1042
1043/**
1044 * Calculates the CPU mode.
1045 *
1046 * This is mainly for updating IEMCPU::enmCpuMode.
1047 *
1048 * @returns CPU mode.
1049 * @param pVCpu The cross context virtual CPU structure of the
1050 * calling thread.
1051 */
1052DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1053{
1054 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1055 return IEMMODE_64BIT;
1056 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1057 return IEMMODE_32BIT;
1058 return IEMMODE_16BIT;
1059}
1060
1061
1062/**
1063 * Initializes the execution state.
1064 *
1065 * @param pVCpu The cross context virtual CPU structure of the
1066 * calling thread.
1067 * @param fBypassHandlers Whether to bypass access handlers.
1068 *
1069 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1070 * side-effects in strict builds.
1071 */
1072DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1073{
1074 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1075 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1076
1077#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1083 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1084 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1085 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1086#endif
1087
1088#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1089 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1090#endif
1091 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1092 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1093#ifdef VBOX_STRICT
1094 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1095 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1096 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1097 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1098 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1099 pVCpu->iem.s.uRexReg = 127;
1100 pVCpu->iem.s.uRexB = 127;
1101 pVCpu->iem.s.offModRm = 127;
1102 pVCpu->iem.s.uRexIndex = 127;
1103 pVCpu->iem.s.iEffSeg = 127;
1104 pVCpu->iem.s.idxPrefix = 127;
1105 pVCpu->iem.s.uVex3rdReg = 127;
1106 pVCpu->iem.s.uVexLength = 127;
1107 pVCpu->iem.s.fEvexStuff = 127;
1108 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1109# ifdef IEM_WITH_CODE_TLB
1110 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1111 pVCpu->iem.s.pbInstrBuf = NULL;
1112 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1113 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1114 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1115 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1116# else
1117 pVCpu->iem.s.offOpcode = 127;
1118 pVCpu->iem.s.cbOpcode = 127;
1119# endif
1120#endif
1121
1122 pVCpu->iem.s.cActiveMappings = 0;
1123 pVCpu->iem.s.iNextMapping = 0;
1124 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1125 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1126#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1127 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1128 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1129 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1130 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1131 if (!pVCpu->iem.s.fInPatchCode)
1132 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1133#endif
1134}
1135
1136#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1137/**
1138 * Performs a minimal reinitialization of the execution state.
1139 *
1140 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1141 * 'world-switch' types operations on the CPU. Currently only nested
1142 * hardware-virtualization uses it.
1143 *
1144 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1145 */
1146IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1147{
1148 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1149 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1150
1151 pVCpu->iem.s.uCpl = uCpl;
1152 pVCpu->iem.s.enmCpuMode = enmMode;
1153 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1154 pVCpu->iem.s.enmEffAddrMode = enmMode;
1155 if (enmMode != IEMMODE_64BIT)
1156 {
1157 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1158 pVCpu->iem.s.enmEffOpSize = enmMode;
1159 }
1160 else
1161 {
1162 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1163 pVCpu->iem.s.enmEffOpSize = enmMode;
1164 }
1165 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1166#ifndef IEM_WITH_CODE_TLB
1167 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1168 pVCpu->iem.s.offOpcode = 0;
1169 pVCpu->iem.s.cbOpcode = 0;
1170#endif
1171 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1172}
1173#endif
1174
1175/**
1176 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1177 *
1178 * @param pVCpu The cross context virtual CPU structure of the
1179 * calling thread.
1180 */
1181DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1182{
1183 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1184#ifdef VBOX_STRICT
1185# ifdef IEM_WITH_CODE_TLB
1186 NOREF(pVCpu);
1187# else
1188 pVCpu->iem.s.cbOpcode = 0;
1189# endif
1190#else
1191 NOREF(pVCpu);
1192#endif
1193}
1194
1195
1196/**
1197 * Initializes the decoder state.
1198 *
1199 * iemReInitDecoder is mostly a copy of this function.
1200 *
1201 * @param pVCpu The cross context virtual CPU structure of the
1202 * calling thread.
1203 * @param fBypassHandlers Whether to bypass access handlers.
1204 */
1205DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1206{
1207 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1208 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1209
1210#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1219#endif
1220
1221#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1222 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1223#endif
1224 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1225 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1226 pVCpu->iem.s.enmCpuMode = enmMode;
1227 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1228 pVCpu->iem.s.enmEffAddrMode = enmMode;
1229 if (enmMode != IEMMODE_64BIT)
1230 {
1231 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1232 pVCpu->iem.s.enmEffOpSize = enmMode;
1233 }
1234 else
1235 {
1236 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1237 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1238 }
1239 pVCpu->iem.s.fPrefixes = 0;
1240 pVCpu->iem.s.uRexReg = 0;
1241 pVCpu->iem.s.uRexB = 0;
1242 pVCpu->iem.s.uRexIndex = 0;
1243 pVCpu->iem.s.idxPrefix = 0;
1244 pVCpu->iem.s.uVex3rdReg = 0;
1245 pVCpu->iem.s.uVexLength = 0;
1246 pVCpu->iem.s.fEvexStuff = 0;
1247 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1248#ifdef IEM_WITH_CODE_TLB
1249 pVCpu->iem.s.pbInstrBuf = NULL;
1250 pVCpu->iem.s.offInstrNextByte = 0;
1251 pVCpu->iem.s.offCurInstrStart = 0;
1252# ifdef VBOX_STRICT
1253 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1254 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1255 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1256# endif
1257#else
1258 pVCpu->iem.s.offOpcode = 0;
1259 pVCpu->iem.s.cbOpcode = 0;
1260#endif
1261 pVCpu->iem.s.offModRm = 0;
1262 pVCpu->iem.s.cActiveMappings = 0;
1263 pVCpu->iem.s.iNextMapping = 0;
1264 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1265 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1266#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1267 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1268 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1269 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1270 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1271 if (!pVCpu->iem.s.fInPatchCode)
1272 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1273#endif
1274
1275#ifdef DBGFTRACE_ENABLED
1276 switch (enmMode)
1277 {
1278 case IEMMODE_64BIT:
1279 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1280 break;
1281 case IEMMODE_32BIT:
1282 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1283 break;
1284 case IEMMODE_16BIT:
1285 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1286 break;
1287 }
1288#endif
1289}
1290
1291
1292/**
1293 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1294 *
1295 * This is mostly a copy of iemInitDecoder.
1296 *
1297 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1298 */
1299DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1300{
1301 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1302
1303#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1311 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1312#endif
1313
1314 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1315 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1316 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1317 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1318 pVCpu->iem.s.enmEffAddrMode = enmMode;
1319 if (enmMode != IEMMODE_64BIT)
1320 {
1321 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1322 pVCpu->iem.s.enmEffOpSize = enmMode;
1323 }
1324 else
1325 {
1326 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1327 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1328 }
1329 pVCpu->iem.s.fPrefixes = 0;
1330 pVCpu->iem.s.uRexReg = 0;
1331 pVCpu->iem.s.uRexB = 0;
1332 pVCpu->iem.s.uRexIndex = 0;
1333 pVCpu->iem.s.idxPrefix = 0;
1334 pVCpu->iem.s.uVex3rdReg = 0;
1335 pVCpu->iem.s.uVexLength = 0;
1336 pVCpu->iem.s.fEvexStuff = 0;
1337 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1338#ifdef IEM_WITH_CODE_TLB
1339 if (pVCpu->iem.s.pbInstrBuf)
1340 {
1341 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1342 - pVCpu->iem.s.uInstrBufPc;
1343 if (off < pVCpu->iem.s.cbInstrBufTotal)
1344 {
1345 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1346 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1347 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1348 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1349 else
1350 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1351 }
1352 else
1353 {
1354 pVCpu->iem.s.pbInstrBuf = NULL;
1355 pVCpu->iem.s.offInstrNextByte = 0;
1356 pVCpu->iem.s.offCurInstrStart = 0;
1357 pVCpu->iem.s.cbInstrBuf = 0;
1358 pVCpu->iem.s.cbInstrBufTotal = 0;
1359 }
1360 }
1361 else
1362 {
1363 pVCpu->iem.s.offInstrNextByte = 0;
1364 pVCpu->iem.s.offCurInstrStart = 0;
1365 pVCpu->iem.s.cbInstrBuf = 0;
1366 pVCpu->iem.s.cbInstrBufTotal = 0;
1367 }
1368#else
1369 pVCpu->iem.s.cbOpcode = 0;
1370 pVCpu->iem.s.offOpcode = 0;
1371#endif
1372 pVCpu->iem.s.offModRm = 0;
1373 Assert(pVCpu->iem.s.cActiveMappings == 0);
1374 pVCpu->iem.s.iNextMapping = 0;
1375 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1376 Assert(pVCpu->iem.s.fBypassHandlers == false);
1377#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1378 if (!pVCpu->iem.s.fInPatchCode)
1379 { /* likely */ }
1380 else
1381 {
1382 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1383 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1384 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1385 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1386 if (!pVCpu->iem.s.fInPatchCode)
1387 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1388 }
1389#endif
1390
1391#ifdef DBGFTRACE_ENABLED
1392 switch (enmMode)
1393 {
1394 case IEMMODE_64BIT:
1395 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1396 break;
1397 case IEMMODE_32BIT:
1398 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1399 break;
1400 case IEMMODE_16BIT:
1401 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1402 break;
1403 }
1404#endif
1405}
1406
1407
1408
1409/**
1410 * Prefetch opcodes the first time when starting executing.
1411 *
1412 * @returns Strict VBox status code.
1413 * @param pVCpu The cross context virtual CPU structure of the
1414 * calling thread.
1415 * @param fBypassHandlers Whether to bypass access handlers.
1416 */
1417IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1418{
1419 iemInitDecoder(pVCpu, fBypassHandlers);
1420
1421#ifdef IEM_WITH_CODE_TLB
1422 /** @todo Do ITLB lookup here. */
1423
1424#else /* !IEM_WITH_CODE_TLB */
1425
1426 /*
1427 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1428 *
1429 * First translate CS:rIP to a physical address.
1430 */
1431 uint32_t cbToTryRead;
1432 RTGCPTR GCPtrPC;
1433 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1434 {
1435 cbToTryRead = PAGE_SIZE;
1436 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1437 if (IEM_IS_CANONICAL(GCPtrPC))
1438 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1439 else
1440 return iemRaiseGeneralProtectionFault0(pVCpu);
1441 }
1442 else
1443 {
1444 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1445 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1446 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1447 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1448 else
1449 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1450 if (cbToTryRead) { /* likely */ }
1451 else /* overflowed */
1452 {
1453 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1454 cbToTryRead = UINT32_MAX;
1455 }
1456 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1457 Assert(GCPtrPC <= UINT32_MAX);
1458 }
1459
1460# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1461 /* Allow interpretation of patch manager code blocks since they can for
1462 instance throw #PFs for perfectly good reasons. */
1463 if (pVCpu->iem.s.fInPatchCode)
1464 {
1465 size_t cbRead = 0;
1466 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1467 AssertRCReturn(rc, rc);
1468 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1469 return VINF_SUCCESS;
1470 }
1471# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1472
1473 RTGCPHYS GCPhys;
1474 uint64_t fFlags;
1475 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1476 if (RT_SUCCESS(rc)) { /* probable */ }
1477 else
1478 {
1479 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1480 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1481 }
1482 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1483 else
1484 {
1485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1486 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1487 }
1488 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1489 else
1490 {
1491 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1492 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1493 }
1494 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1495 /** @todo Check reserved bits and such stuff. PGM is better at doing
1496 * that, so do it when implementing the guest virtual address
1497 * TLB... */
1498
1499 /*
1500 * Read the bytes at this address.
1501 */
1502 PVM pVM = pVCpu->CTX_SUFF(pVM);
1503# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1504 size_t cbActual;
1505 if ( PATMIsEnabled(pVM)
1506 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1507 {
1508 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1509 Assert(cbActual > 0);
1510 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1511 }
1512 else
1513# endif
1514 {
1515 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1516 if (cbToTryRead > cbLeftOnPage)
1517 cbToTryRead = cbLeftOnPage;
1518 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1519 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1520
1521 if (!pVCpu->iem.s.fBypassHandlers)
1522 {
1523 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1524 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1525 { /* likely */ }
1526 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1527 {
1528 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1529 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1530 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1531 }
1532 else
1533 {
1534 Log((RT_SUCCESS(rcStrict)
1535 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1536 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1537 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1538 return rcStrict;
1539 }
1540 }
1541 else
1542 {
1543 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1544 if (RT_SUCCESS(rc))
1545 { /* likely */ }
1546 else
1547 {
1548 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1549 GCPtrPC, GCPhys, rc, cbToTryRead));
1550 return rc;
1551 }
1552 }
1553 pVCpu->iem.s.cbOpcode = cbToTryRead;
1554 }
1555#endif /* !IEM_WITH_CODE_TLB */
1556 return VINF_SUCCESS;
1557}
1558
1559
1560/**
1561 * Invalidates the IEM TLBs.
1562 *
1563 * This is called internally as well as by PGM when moving GC mappings.
1564 *
1565 * @returns
1566 * @param pVCpu The cross context virtual CPU structure of the calling
1567 * thread.
1568 * @param fVmm Set when PGM calls us with a remapping.
1569 */
1570VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1571{
1572#ifdef IEM_WITH_CODE_TLB
1573 pVCpu->iem.s.cbInstrBufTotal = 0;
1574 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1575 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1576 { /* very likely */ }
1577 else
1578 {
1579 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1580 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1581 while (i-- > 0)
1582 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1583 }
1584#endif
1585
1586#ifdef IEM_WITH_DATA_TLB
1587 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1588 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1589 { /* very likely */ }
1590 else
1591 {
1592 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1593 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1594 while (i-- > 0)
1595 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1596 }
1597#endif
1598 NOREF(pVCpu); NOREF(fVmm);
1599}
1600
1601
1602/**
1603 * Invalidates a page in the TLBs.
1604 *
1605 * @param pVCpu The cross context virtual CPU structure of the calling
1606 * thread.
1607 * @param GCPtr The address of the page to invalidate
1608 */
1609VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1610{
1611#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1612 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1613 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1614 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1615 uintptr_t idx = (uint8_t)GCPtr;
1616
1617# ifdef IEM_WITH_CODE_TLB
1618 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1619 {
1620 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1621 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1622 pVCpu->iem.s.cbInstrBufTotal = 0;
1623 }
1624# endif
1625
1626# ifdef IEM_WITH_DATA_TLB
1627 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1628 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1629# endif
1630#else
1631 NOREF(pVCpu); NOREF(GCPtr);
1632#endif
1633}
1634
1635
1636/**
1637 * Invalidates the host physical aspects of the IEM TLBs.
1638 *
1639 * This is called internally as well as by PGM when moving GC mappings.
1640 *
1641 * @param pVCpu The cross context virtual CPU structure of the calling
1642 * thread.
1643 */
1644VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1645{
1646#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1647 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1648
1649# ifdef IEM_WITH_CODE_TLB
1650 pVCpu->iem.s.cbInstrBufTotal = 0;
1651# endif
1652 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1653 if (uTlbPhysRev != 0)
1654 {
1655 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1656 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1657 }
1658 else
1659 {
1660 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1661 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1662
1663 unsigned i;
1664# ifdef IEM_WITH_CODE_TLB
1665 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1666 while (i-- > 0)
1667 {
1668 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1669 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1670 }
1671# endif
1672# ifdef IEM_WITH_DATA_TLB
1673 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1674 while (i-- > 0)
1675 {
1676 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1677 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1678 }
1679# endif
1680 }
1681#else
1682 NOREF(pVCpu);
1683#endif
1684}
1685
1686
1687/**
1688 * Invalidates the host physical aspects of the IEM TLBs.
1689 *
1690 * This is called internally as well as by PGM when moving GC mappings.
1691 *
1692 * @param pVM The cross context VM structure.
1693 *
1694 * @remarks Caller holds the PGM lock.
1695 */
1696VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1697{
1698 RT_NOREF_PV(pVM);
1699}
1700
1701#ifdef IEM_WITH_CODE_TLB
1702
1703/**
1704 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1705 * failure and jumps.
1706 *
1707 * We end up here for a number of reasons:
1708 * - pbInstrBuf isn't yet initialized.
1709 * - Advancing beyond the buffer boundrary (e.g. cross page).
1710 * - Advancing beyond the CS segment limit.
1711 * - Fetching from non-mappable page (e.g. MMIO).
1712 *
1713 * @param pVCpu The cross context virtual CPU structure of the
1714 * calling thread.
1715 * @param pvDst Where to return the bytes.
1716 * @param cbDst Number of bytes to read.
1717 *
1718 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1719 */
1720IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1721{
1722#ifdef IN_RING3
1723 for (;;)
1724 {
1725 Assert(cbDst <= 8);
1726 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1727
1728 /*
1729 * We might have a partial buffer match, deal with that first to make the
1730 * rest simpler. This is the first part of the cross page/buffer case.
1731 */
1732 if (pVCpu->iem.s.pbInstrBuf != NULL)
1733 {
1734 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1735 {
1736 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1737 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1738 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1739
1740 cbDst -= cbCopy;
1741 pvDst = (uint8_t *)pvDst + cbCopy;
1742 offBuf += cbCopy;
1743 pVCpu->iem.s.offInstrNextByte += offBuf;
1744 }
1745 }
1746
1747 /*
1748 * Check segment limit, figuring how much we're allowed to access at this point.
1749 *
1750 * We will fault immediately if RIP is past the segment limit / in non-canonical
1751 * territory. If we do continue, there are one or more bytes to read before we
1752 * end up in trouble and we need to do that first before faulting.
1753 */
1754 RTGCPTR GCPtrFirst;
1755 uint32_t cbMaxRead;
1756 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1757 {
1758 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1759 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1760 { /* likely */ }
1761 else
1762 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1763 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1764 }
1765 else
1766 {
1767 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1768 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1769 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1770 { /* likely */ }
1771 else
1772 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1773 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1774 if (cbMaxRead != 0)
1775 { /* likely */ }
1776 else
1777 {
1778 /* Overflowed because address is 0 and limit is max. */
1779 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1780 cbMaxRead = X86_PAGE_SIZE;
1781 }
1782 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1783 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1784 if (cbMaxRead2 < cbMaxRead)
1785 cbMaxRead = cbMaxRead2;
1786 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1787 }
1788
1789 /*
1790 * Get the TLB entry for this piece of code.
1791 */
1792 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1793 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1794 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1795 if (pTlbe->uTag == uTag)
1796 {
1797 /* likely when executing lots of code, otherwise unlikely */
1798# ifdef VBOX_WITH_STATISTICS
1799 pVCpu->iem.s.CodeTlb.cTlbHits++;
1800# endif
1801 }
1802 else
1803 {
1804 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1805# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1806 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1807 {
1808 pTlbe->uTag = uTag;
1809 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1810 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1811 pTlbe->GCPhys = NIL_RTGCPHYS;
1812 pTlbe->pbMappingR3 = NULL;
1813 }
1814 else
1815# endif
1816 {
1817 RTGCPHYS GCPhys;
1818 uint64_t fFlags;
1819 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1820 if (RT_FAILURE(rc))
1821 {
1822 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1823 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1824 }
1825
1826 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1827 pTlbe->uTag = uTag;
1828 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1829 pTlbe->GCPhys = GCPhys;
1830 pTlbe->pbMappingR3 = NULL;
1831 }
1832 }
1833
1834 /*
1835 * Check TLB page table level access flags.
1836 */
1837 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1838 {
1839 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1840 {
1841 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1842 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1843 }
1844 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1845 {
1846 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1847 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1848 }
1849 }
1850
1851# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1852 /*
1853 * Allow interpretation of patch manager code blocks since they can for
1854 * instance throw #PFs for perfectly good reasons.
1855 */
1856 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1857 { /* no unlikely */ }
1858 else
1859 {
1860 /** @todo Could be optimized this a little in ring-3 if we liked. */
1861 size_t cbRead = 0;
1862 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1863 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1864 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1865 return;
1866 }
1867# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1868
1869 /*
1870 * Look up the physical page info if necessary.
1871 */
1872 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1873 { /* not necessary */ }
1874 else
1875 {
1876 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1877 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1878 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1879 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1880 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1881 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1882 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1883 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1884 }
1885
1886# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1887 /*
1888 * Try do a direct read using the pbMappingR3 pointer.
1889 */
1890 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1891 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1892 {
1893 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1894 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1895 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1896 {
1897 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1898 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1899 }
1900 else
1901 {
1902 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1903 Assert(cbInstr < cbMaxRead);
1904 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1905 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1906 }
1907 if (cbDst <= cbMaxRead)
1908 {
1909 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1910 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1911 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1912 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1913 return;
1914 }
1915 pVCpu->iem.s.pbInstrBuf = NULL;
1916
1917 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1918 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1919 }
1920 else
1921# endif
1922#if 0
1923 /*
1924 * If there is no special read handling, so we can read a bit more and
1925 * put it in the prefetch buffer.
1926 */
1927 if ( cbDst < cbMaxRead
1928 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1929 {
1930 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1931 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1932 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1933 { /* likely */ }
1934 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1935 {
1936 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1937 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1938 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1939 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1940 }
1941 else
1942 {
1943 Log((RT_SUCCESS(rcStrict)
1944 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1945 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1946 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1947 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1948 }
1949 }
1950 /*
1951 * Special read handling, so only read exactly what's needed.
1952 * This is a highly unlikely scenario.
1953 */
1954 else
1955#endif
1956 {
1957 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1958 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1959 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1960 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1961 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1962 { /* likely */ }
1963 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1964 {
1965 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1966 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1967 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1968 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1969 }
1970 else
1971 {
1972 Log((RT_SUCCESS(rcStrict)
1973 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1974 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1975 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1976 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1977 }
1978 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1979 if (cbToRead == cbDst)
1980 return;
1981 }
1982
1983 /*
1984 * More to read, loop.
1985 */
1986 cbDst -= cbMaxRead;
1987 pvDst = (uint8_t *)pvDst + cbMaxRead;
1988 }
1989#else
1990 RT_NOREF(pvDst, cbDst);
1991 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1992#endif
1993}
1994
1995#else
1996
1997/**
1998 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1999 * exception if it fails.
2000 *
2001 * @returns Strict VBox status code.
2002 * @param pVCpu The cross context virtual CPU structure of the
2003 * calling thread.
2004 * @param cbMin The minimum number of bytes relative offOpcode
2005 * that must be read.
2006 */
2007IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2008{
2009 /*
2010 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2011 *
2012 * First translate CS:rIP to a physical address.
2013 */
2014 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2015 uint32_t cbToTryRead;
2016 RTGCPTR GCPtrNext;
2017 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2018 {
2019 cbToTryRead = PAGE_SIZE;
2020 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2021 if (!IEM_IS_CANONICAL(GCPtrNext))
2022 return iemRaiseGeneralProtectionFault0(pVCpu);
2023 }
2024 else
2025 {
2026 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2027 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2028 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2029 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2030 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2031 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2032 if (!cbToTryRead) /* overflowed */
2033 {
2034 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2035 cbToTryRead = UINT32_MAX;
2036 /** @todo check out wrapping around the code segment. */
2037 }
2038 if (cbToTryRead < cbMin - cbLeft)
2039 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2040 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2041 }
2042
2043 /* Only read up to the end of the page, and make sure we don't read more
2044 than the opcode buffer can hold. */
2045 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2046 if (cbToTryRead > cbLeftOnPage)
2047 cbToTryRead = cbLeftOnPage;
2048 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2049 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2050/** @todo r=bird: Convert assertion into undefined opcode exception? */
2051 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2052
2053# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2054 /* Allow interpretation of patch manager code blocks since they can for
2055 instance throw #PFs for perfectly good reasons. */
2056 if (pVCpu->iem.s.fInPatchCode)
2057 {
2058 size_t cbRead = 0;
2059 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2060 AssertRCReturn(rc, rc);
2061 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2062 return VINF_SUCCESS;
2063 }
2064# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2065
2066 RTGCPHYS GCPhys;
2067 uint64_t fFlags;
2068 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2069 if (RT_FAILURE(rc))
2070 {
2071 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2072 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2073 }
2074 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2075 {
2076 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2077 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2078 }
2079 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2080 {
2081 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2082 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2083 }
2084 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2085 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2086 /** @todo Check reserved bits and such stuff. PGM is better at doing
2087 * that, so do it when implementing the guest virtual address
2088 * TLB... */
2089
2090 /*
2091 * Read the bytes at this address.
2092 *
2093 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2094 * and since PATM should only patch the start of an instruction there
2095 * should be no need to check again here.
2096 */
2097 if (!pVCpu->iem.s.fBypassHandlers)
2098 {
2099 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2100 cbToTryRead, PGMACCESSORIGIN_IEM);
2101 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2102 { /* likely */ }
2103 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2104 {
2105 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2106 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2107 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2108 }
2109 else
2110 {
2111 Log((RT_SUCCESS(rcStrict)
2112 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2113 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2114 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2115 return rcStrict;
2116 }
2117 }
2118 else
2119 {
2120 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2121 if (RT_SUCCESS(rc))
2122 { /* likely */ }
2123 else
2124 {
2125 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2126 return rc;
2127 }
2128 }
2129 pVCpu->iem.s.cbOpcode += cbToTryRead;
2130 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2131
2132 return VINF_SUCCESS;
2133}
2134
2135#endif /* !IEM_WITH_CODE_TLB */
2136#ifndef IEM_WITH_SETJMP
2137
2138/**
2139 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2140 *
2141 * @returns Strict VBox status code.
2142 * @param pVCpu The cross context virtual CPU structure of the
2143 * calling thread.
2144 * @param pb Where to return the opcode byte.
2145 */
2146DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2147{
2148 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2149 if (rcStrict == VINF_SUCCESS)
2150 {
2151 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2152 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2153 pVCpu->iem.s.offOpcode = offOpcode + 1;
2154 }
2155 else
2156 *pb = 0;
2157 return rcStrict;
2158}
2159
2160
2161/**
2162 * Fetches the next opcode byte.
2163 *
2164 * @returns Strict VBox status code.
2165 * @param pVCpu The cross context virtual CPU structure of the
2166 * calling thread.
2167 * @param pu8 Where to return the opcode byte.
2168 */
2169DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2170{
2171 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2172 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2173 {
2174 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2175 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2176 return VINF_SUCCESS;
2177 }
2178 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2179}
2180
2181#else /* IEM_WITH_SETJMP */
2182
2183/**
2184 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2185 *
2186 * @returns The opcode byte.
2187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2188 */
2189DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2190{
2191# ifdef IEM_WITH_CODE_TLB
2192 uint8_t u8;
2193 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2194 return u8;
2195# else
2196 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2197 if (rcStrict == VINF_SUCCESS)
2198 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2199 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2200# endif
2201}
2202
2203
2204/**
2205 * Fetches the next opcode byte, longjmp on error.
2206 *
2207 * @returns The opcode byte.
2208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2209 */
2210DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2211{
2212# ifdef IEM_WITH_CODE_TLB
2213 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2214 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2215 if (RT_LIKELY( pbBuf != NULL
2216 && offBuf < pVCpu->iem.s.cbInstrBuf))
2217 {
2218 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2219 return pbBuf[offBuf];
2220 }
2221# else
2222 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2223 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2224 {
2225 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2226 return pVCpu->iem.s.abOpcode[offOpcode];
2227 }
2228# endif
2229 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2230}
2231
2232#endif /* IEM_WITH_SETJMP */
2233
2234/**
2235 * Fetches the next opcode byte, returns automatically on failure.
2236 *
2237 * @param a_pu8 Where to return the opcode byte.
2238 * @remark Implicitly references pVCpu.
2239 */
2240#ifndef IEM_WITH_SETJMP
2241# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2242 do \
2243 { \
2244 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2245 if (rcStrict2 == VINF_SUCCESS) \
2246 { /* likely */ } \
2247 else \
2248 return rcStrict2; \
2249 } while (0)
2250#else
2251# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2252#endif /* IEM_WITH_SETJMP */
2253
2254
2255#ifndef IEM_WITH_SETJMP
2256/**
2257 * Fetches the next signed byte from the opcode stream.
2258 *
2259 * @returns Strict VBox status code.
2260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2261 * @param pi8 Where to return the signed byte.
2262 */
2263DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2264{
2265 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2266}
2267#endif /* !IEM_WITH_SETJMP */
2268
2269
2270/**
2271 * Fetches the next signed byte from the opcode stream, returning automatically
2272 * on failure.
2273 *
2274 * @param a_pi8 Where to return the signed byte.
2275 * @remark Implicitly references pVCpu.
2276 */
2277#ifndef IEM_WITH_SETJMP
2278# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2279 do \
2280 { \
2281 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2282 if (rcStrict2 != VINF_SUCCESS) \
2283 return rcStrict2; \
2284 } while (0)
2285#else /* IEM_WITH_SETJMP */
2286# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2287
2288#endif /* IEM_WITH_SETJMP */
2289
2290#ifndef IEM_WITH_SETJMP
2291
2292/**
2293 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2294 *
2295 * @returns Strict VBox status code.
2296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2297 * @param pu16 Where to return the opcode dword.
2298 */
2299DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2300{
2301 uint8_t u8;
2302 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2303 if (rcStrict == VINF_SUCCESS)
2304 *pu16 = (int8_t)u8;
2305 return rcStrict;
2306}
2307
2308
2309/**
2310 * Fetches the next signed byte from the opcode stream, extending it to
2311 * unsigned 16-bit.
2312 *
2313 * @returns Strict VBox status code.
2314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2315 * @param pu16 Where to return the unsigned word.
2316 */
2317DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2318{
2319 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2320 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2321 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2322
2323 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2324 pVCpu->iem.s.offOpcode = offOpcode + 1;
2325 return VINF_SUCCESS;
2326}
2327
2328#endif /* !IEM_WITH_SETJMP */
2329
2330/**
2331 * Fetches the next signed byte from the opcode stream and sign-extending it to
2332 * a word, returning automatically on failure.
2333 *
2334 * @param a_pu16 Where to return the word.
2335 * @remark Implicitly references pVCpu.
2336 */
2337#ifndef IEM_WITH_SETJMP
2338# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2339 do \
2340 { \
2341 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2342 if (rcStrict2 != VINF_SUCCESS) \
2343 return rcStrict2; \
2344 } while (0)
2345#else
2346# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2347#endif
2348
2349#ifndef IEM_WITH_SETJMP
2350
2351/**
2352 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2353 *
2354 * @returns Strict VBox status code.
2355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2356 * @param pu32 Where to return the opcode dword.
2357 */
2358DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2359{
2360 uint8_t u8;
2361 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2362 if (rcStrict == VINF_SUCCESS)
2363 *pu32 = (int8_t)u8;
2364 return rcStrict;
2365}
2366
2367
2368/**
2369 * Fetches the next signed byte from the opcode stream, extending it to
2370 * unsigned 32-bit.
2371 *
2372 * @returns Strict VBox status code.
2373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2374 * @param pu32 Where to return the unsigned dword.
2375 */
2376DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2377{
2378 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2379 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2380 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2381
2382 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2383 pVCpu->iem.s.offOpcode = offOpcode + 1;
2384 return VINF_SUCCESS;
2385}
2386
2387#endif /* !IEM_WITH_SETJMP */
2388
2389/**
2390 * Fetches the next signed byte from the opcode stream and sign-extending it to
2391 * a word, returning automatically on failure.
2392 *
2393 * @param a_pu32 Where to return the word.
2394 * @remark Implicitly references pVCpu.
2395 */
2396#ifndef IEM_WITH_SETJMP
2397#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2398 do \
2399 { \
2400 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2401 if (rcStrict2 != VINF_SUCCESS) \
2402 return rcStrict2; \
2403 } while (0)
2404#else
2405# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2406#endif
2407
2408#ifndef IEM_WITH_SETJMP
2409
2410/**
2411 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2412 *
2413 * @returns Strict VBox status code.
2414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2415 * @param pu64 Where to return the opcode qword.
2416 */
2417DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2418{
2419 uint8_t u8;
2420 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2421 if (rcStrict == VINF_SUCCESS)
2422 *pu64 = (int8_t)u8;
2423 return rcStrict;
2424}
2425
2426
2427/**
2428 * Fetches the next signed byte from the opcode stream, extending it to
2429 * unsigned 64-bit.
2430 *
2431 * @returns Strict VBox status code.
2432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2433 * @param pu64 Where to return the unsigned qword.
2434 */
2435DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2436{
2437 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2438 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2439 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2440
2441 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2442 pVCpu->iem.s.offOpcode = offOpcode + 1;
2443 return VINF_SUCCESS;
2444}
2445
2446#endif /* !IEM_WITH_SETJMP */
2447
2448
2449/**
2450 * Fetches the next signed byte from the opcode stream and sign-extending it to
2451 * a word, returning automatically on failure.
2452 *
2453 * @param a_pu64 Where to return the word.
2454 * @remark Implicitly references pVCpu.
2455 */
2456#ifndef IEM_WITH_SETJMP
2457# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2458 do \
2459 { \
2460 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2461 if (rcStrict2 != VINF_SUCCESS) \
2462 return rcStrict2; \
2463 } while (0)
2464#else
2465# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2466#endif
2467
2468
2469#ifndef IEM_WITH_SETJMP
2470/**
2471 * Fetches the next opcode byte.
2472 *
2473 * @returns Strict VBox status code.
2474 * @param pVCpu The cross context virtual CPU structure of the
2475 * calling thread.
2476 * @param pu8 Where to return the opcode byte.
2477 */
2478DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2479{
2480 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2481 pVCpu->iem.s.offModRm = offOpcode;
2482 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2483 {
2484 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2485 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2486 return VINF_SUCCESS;
2487 }
2488 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2489}
2490#else /* IEM_WITH_SETJMP */
2491/**
2492 * Fetches the next opcode byte, longjmp on error.
2493 *
2494 * @returns The opcode byte.
2495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2496 */
2497DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2498{
2499# ifdef IEM_WITH_CODE_TLB
2500 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2501 pVCpu->iem.s.offModRm = offBuf;
2502 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2503 if (RT_LIKELY( pbBuf != NULL
2504 && offBuf < pVCpu->iem.s.cbInstrBuf))
2505 {
2506 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2507 return pbBuf[offBuf];
2508 }
2509# else
2510 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2511 pVCpu->iem.s.offModRm = offOpcode;
2512 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2513 {
2514 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2515 return pVCpu->iem.s.abOpcode[offOpcode];
2516 }
2517# endif
2518 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2519}
2520#endif /* IEM_WITH_SETJMP */
2521
2522/**
2523 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2524 * on failure.
2525 *
2526 * Will note down the position of the ModR/M byte for VT-x exits.
2527 *
2528 * @param a_pbRm Where to return the RM opcode byte.
2529 * @remark Implicitly references pVCpu.
2530 */
2531#ifndef IEM_WITH_SETJMP
2532# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2533 do \
2534 { \
2535 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2536 if (rcStrict2 == VINF_SUCCESS) \
2537 { /* likely */ } \
2538 else \
2539 return rcStrict2; \
2540 } while (0)
2541#else
2542# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2543#endif /* IEM_WITH_SETJMP */
2544
2545
2546#ifndef IEM_WITH_SETJMP
2547
2548/**
2549 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2550 *
2551 * @returns Strict VBox status code.
2552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2553 * @param pu16 Where to return the opcode word.
2554 */
2555DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2556{
2557 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2558 if (rcStrict == VINF_SUCCESS)
2559 {
2560 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2561# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2562 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2563# else
2564 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2565# endif
2566 pVCpu->iem.s.offOpcode = offOpcode + 2;
2567 }
2568 else
2569 *pu16 = 0;
2570 return rcStrict;
2571}
2572
2573
2574/**
2575 * Fetches the next opcode word.
2576 *
2577 * @returns Strict VBox status code.
2578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2579 * @param pu16 Where to return the opcode word.
2580 */
2581DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2582{
2583 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2584 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2585 {
2586 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2587# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2588 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2589# else
2590 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2591# endif
2592 return VINF_SUCCESS;
2593 }
2594 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2595}
2596
2597#else /* IEM_WITH_SETJMP */
2598
2599/**
2600 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2601 *
2602 * @returns The opcode word.
2603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2604 */
2605DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2606{
2607# ifdef IEM_WITH_CODE_TLB
2608 uint16_t u16;
2609 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2610 return u16;
2611# else
2612 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2613 if (rcStrict == VINF_SUCCESS)
2614 {
2615 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2616 pVCpu->iem.s.offOpcode += 2;
2617# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2618 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2619# else
2620 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2621# endif
2622 }
2623 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2624# endif
2625}
2626
2627
2628/**
2629 * Fetches the next opcode word, longjmp on error.
2630 *
2631 * @returns The opcode word.
2632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2633 */
2634DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2635{
2636# ifdef IEM_WITH_CODE_TLB
2637 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2638 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2639 if (RT_LIKELY( pbBuf != NULL
2640 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2641 {
2642 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2643# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2644 return *(uint16_t const *)&pbBuf[offBuf];
2645# else
2646 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2647# endif
2648 }
2649# else
2650 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2651 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2652 {
2653 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2655 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2656# else
2657 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2658# endif
2659 }
2660# endif
2661 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2662}
2663
2664#endif /* IEM_WITH_SETJMP */
2665
2666
2667/**
2668 * Fetches the next opcode word, returns automatically on failure.
2669 *
2670 * @param a_pu16 Where to return the opcode word.
2671 * @remark Implicitly references pVCpu.
2672 */
2673#ifndef IEM_WITH_SETJMP
2674# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2675 do \
2676 { \
2677 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2678 if (rcStrict2 != VINF_SUCCESS) \
2679 return rcStrict2; \
2680 } while (0)
2681#else
2682# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2683#endif
2684
2685#ifndef IEM_WITH_SETJMP
2686
2687/**
2688 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2689 *
2690 * @returns Strict VBox status code.
2691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2692 * @param pu32 Where to return the opcode double word.
2693 */
2694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2695{
2696 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2697 if (rcStrict == VINF_SUCCESS)
2698 {
2699 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2700 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2701 pVCpu->iem.s.offOpcode = offOpcode + 2;
2702 }
2703 else
2704 *pu32 = 0;
2705 return rcStrict;
2706}
2707
2708
2709/**
2710 * Fetches the next opcode word, zero extending it to a double word.
2711 *
2712 * @returns Strict VBox status code.
2713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2714 * @param pu32 Where to return the opcode double word.
2715 */
2716DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2717{
2718 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2719 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2720 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2721
2722 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2723 pVCpu->iem.s.offOpcode = offOpcode + 2;
2724 return VINF_SUCCESS;
2725}
2726
2727#endif /* !IEM_WITH_SETJMP */
2728
2729
2730/**
2731 * Fetches the next opcode word and zero extends it to a double word, returns
2732 * automatically on failure.
2733 *
2734 * @param a_pu32 Where to return the opcode double word.
2735 * @remark Implicitly references pVCpu.
2736 */
2737#ifndef IEM_WITH_SETJMP
2738# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2739 do \
2740 { \
2741 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2742 if (rcStrict2 != VINF_SUCCESS) \
2743 return rcStrict2; \
2744 } while (0)
2745#else
2746# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2747#endif
2748
2749#ifndef IEM_WITH_SETJMP
2750
2751/**
2752 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2753 *
2754 * @returns Strict VBox status code.
2755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2756 * @param pu64 Where to return the opcode quad word.
2757 */
2758DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2759{
2760 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2761 if (rcStrict == VINF_SUCCESS)
2762 {
2763 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2764 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2765 pVCpu->iem.s.offOpcode = offOpcode + 2;
2766 }
2767 else
2768 *pu64 = 0;
2769 return rcStrict;
2770}
2771
2772
2773/**
2774 * Fetches the next opcode word, zero extending it to a quad word.
2775 *
2776 * @returns Strict VBox status code.
2777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2778 * @param pu64 Where to return the opcode quad word.
2779 */
2780DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2781{
2782 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2783 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2784 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2785
2786 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2787 pVCpu->iem.s.offOpcode = offOpcode + 2;
2788 return VINF_SUCCESS;
2789}
2790
2791#endif /* !IEM_WITH_SETJMP */
2792
2793/**
2794 * Fetches the next opcode word and zero extends it to a quad word, returns
2795 * automatically on failure.
2796 *
2797 * @param a_pu64 Where to return the opcode quad word.
2798 * @remark Implicitly references pVCpu.
2799 */
2800#ifndef IEM_WITH_SETJMP
2801# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2802 do \
2803 { \
2804 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2805 if (rcStrict2 != VINF_SUCCESS) \
2806 return rcStrict2; \
2807 } while (0)
2808#else
2809# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2810#endif
2811
2812
2813#ifndef IEM_WITH_SETJMP
2814/**
2815 * Fetches the next signed word from the opcode stream.
2816 *
2817 * @returns Strict VBox status code.
2818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2819 * @param pi16 Where to return the signed word.
2820 */
2821DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2822{
2823 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2824}
2825#endif /* !IEM_WITH_SETJMP */
2826
2827
2828/**
2829 * Fetches the next signed word from the opcode stream, returning automatically
2830 * on failure.
2831 *
2832 * @param a_pi16 Where to return the signed word.
2833 * @remark Implicitly references pVCpu.
2834 */
2835#ifndef IEM_WITH_SETJMP
2836# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2837 do \
2838 { \
2839 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2840 if (rcStrict2 != VINF_SUCCESS) \
2841 return rcStrict2; \
2842 } while (0)
2843#else
2844# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2845#endif
2846
2847#ifndef IEM_WITH_SETJMP
2848
2849/**
2850 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2851 *
2852 * @returns Strict VBox status code.
2853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2854 * @param pu32 Where to return the opcode dword.
2855 */
2856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2857{
2858 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2859 if (rcStrict == VINF_SUCCESS)
2860 {
2861 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2862# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2863 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2864# else
2865 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2866 pVCpu->iem.s.abOpcode[offOpcode + 1],
2867 pVCpu->iem.s.abOpcode[offOpcode + 2],
2868 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2869# endif
2870 pVCpu->iem.s.offOpcode = offOpcode + 4;
2871 }
2872 else
2873 *pu32 = 0;
2874 return rcStrict;
2875}
2876
2877
2878/**
2879 * Fetches the next opcode dword.
2880 *
2881 * @returns Strict VBox status code.
2882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2883 * @param pu32 Where to return the opcode double word.
2884 */
2885DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2886{
2887 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2888 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2889 {
2890 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2891# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2892 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2893# else
2894 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2895 pVCpu->iem.s.abOpcode[offOpcode + 1],
2896 pVCpu->iem.s.abOpcode[offOpcode + 2],
2897 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2898# endif
2899 return VINF_SUCCESS;
2900 }
2901 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2902}
2903
2904#else /* !IEM_WITH_SETJMP */
2905
2906/**
2907 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2908 *
2909 * @returns The opcode dword.
2910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2911 */
2912DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2913{
2914# ifdef IEM_WITH_CODE_TLB
2915 uint32_t u32;
2916 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2917 return u32;
2918# else
2919 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2920 if (rcStrict == VINF_SUCCESS)
2921 {
2922 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2923 pVCpu->iem.s.offOpcode = offOpcode + 4;
2924# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2925 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2926# else
2927 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2928 pVCpu->iem.s.abOpcode[offOpcode + 1],
2929 pVCpu->iem.s.abOpcode[offOpcode + 2],
2930 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2931# endif
2932 }
2933 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2934# endif
2935}
2936
2937
2938/**
2939 * Fetches the next opcode dword, longjmp on error.
2940 *
2941 * @returns The opcode dword.
2942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2943 */
2944DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2945{
2946# ifdef IEM_WITH_CODE_TLB
2947 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2948 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2949 if (RT_LIKELY( pbBuf != NULL
2950 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2951 {
2952 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2953# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2954 return *(uint32_t const *)&pbBuf[offBuf];
2955# else
2956 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2957 pbBuf[offBuf + 1],
2958 pbBuf[offBuf + 2],
2959 pbBuf[offBuf + 3]);
2960# endif
2961 }
2962# else
2963 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2964 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2965 {
2966 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2967# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2968 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2969# else
2970 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2971 pVCpu->iem.s.abOpcode[offOpcode + 1],
2972 pVCpu->iem.s.abOpcode[offOpcode + 2],
2973 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2974# endif
2975 }
2976# endif
2977 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2978}
2979
2980#endif /* !IEM_WITH_SETJMP */
2981
2982
2983/**
2984 * Fetches the next opcode dword, returns automatically on failure.
2985 *
2986 * @param a_pu32 Where to return the opcode dword.
2987 * @remark Implicitly references pVCpu.
2988 */
2989#ifndef IEM_WITH_SETJMP
2990# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2991 do \
2992 { \
2993 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2994 if (rcStrict2 != VINF_SUCCESS) \
2995 return rcStrict2; \
2996 } while (0)
2997#else
2998# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2999#endif
3000
3001#ifndef IEM_WITH_SETJMP
3002
3003/**
3004 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3005 *
3006 * @returns Strict VBox status code.
3007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3008 * @param pu64 Where to return the opcode dword.
3009 */
3010DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3011{
3012 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3013 if (rcStrict == VINF_SUCCESS)
3014 {
3015 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3016 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3017 pVCpu->iem.s.abOpcode[offOpcode + 1],
3018 pVCpu->iem.s.abOpcode[offOpcode + 2],
3019 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3020 pVCpu->iem.s.offOpcode = offOpcode + 4;
3021 }
3022 else
3023 *pu64 = 0;
3024 return rcStrict;
3025}
3026
3027
3028/**
3029 * Fetches the next opcode dword, zero extending it to a quad word.
3030 *
3031 * @returns Strict VBox status code.
3032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3033 * @param pu64 Where to return the opcode quad word.
3034 */
3035DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3036{
3037 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3038 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3039 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3040
3041 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3042 pVCpu->iem.s.abOpcode[offOpcode + 1],
3043 pVCpu->iem.s.abOpcode[offOpcode + 2],
3044 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3045 pVCpu->iem.s.offOpcode = offOpcode + 4;
3046 return VINF_SUCCESS;
3047}
3048
3049#endif /* !IEM_WITH_SETJMP */
3050
3051
3052/**
3053 * Fetches the next opcode dword and zero extends it to a quad word, returns
3054 * automatically on failure.
3055 *
3056 * @param a_pu64 Where to return the opcode quad word.
3057 * @remark Implicitly references pVCpu.
3058 */
3059#ifndef IEM_WITH_SETJMP
3060# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3061 do \
3062 { \
3063 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3064 if (rcStrict2 != VINF_SUCCESS) \
3065 return rcStrict2; \
3066 } while (0)
3067#else
3068# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3069#endif
3070
3071
3072#ifndef IEM_WITH_SETJMP
3073/**
3074 * Fetches the next signed double word from the opcode stream.
3075 *
3076 * @returns Strict VBox status code.
3077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3078 * @param pi32 Where to return the signed double word.
3079 */
3080DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3081{
3082 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3083}
3084#endif
3085
3086/**
3087 * Fetches the next signed double word from the opcode stream, returning
3088 * automatically on failure.
3089 *
3090 * @param a_pi32 Where to return the signed double word.
3091 * @remark Implicitly references pVCpu.
3092 */
3093#ifndef IEM_WITH_SETJMP
3094# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3095 do \
3096 { \
3097 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3098 if (rcStrict2 != VINF_SUCCESS) \
3099 return rcStrict2; \
3100 } while (0)
3101#else
3102# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3103#endif
3104
3105#ifndef IEM_WITH_SETJMP
3106
3107/**
3108 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3109 *
3110 * @returns Strict VBox status code.
3111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3112 * @param pu64 Where to return the opcode qword.
3113 */
3114DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3115{
3116 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3117 if (rcStrict == VINF_SUCCESS)
3118 {
3119 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3120 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3121 pVCpu->iem.s.abOpcode[offOpcode + 1],
3122 pVCpu->iem.s.abOpcode[offOpcode + 2],
3123 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3124 pVCpu->iem.s.offOpcode = offOpcode + 4;
3125 }
3126 else
3127 *pu64 = 0;
3128 return rcStrict;
3129}
3130
3131
3132/**
3133 * Fetches the next opcode dword, sign extending it into a quad word.
3134 *
3135 * @returns Strict VBox status code.
3136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3137 * @param pu64 Where to return the opcode quad word.
3138 */
3139DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3140{
3141 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3142 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3143 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3144
3145 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3146 pVCpu->iem.s.abOpcode[offOpcode + 1],
3147 pVCpu->iem.s.abOpcode[offOpcode + 2],
3148 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3149 *pu64 = i32;
3150 pVCpu->iem.s.offOpcode = offOpcode + 4;
3151 return VINF_SUCCESS;
3152}
3153
3154#endif /* !IEM_WITH_SETJMP */
3155
3156
3157/**
3158 * Fetches the next opcode double word and sign extends it to a quad word,
3159 * returns automatically on failure.
3160 *
3161 * @param a_pu64 Where to return the opcode quad word.
3162 * @remark Implicitly references pVCpu.
3163 */
3164#ifndef IEM_WITH_SETJMP
3165# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3166 do \
3167 { \
3168 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3169 if (rcStrict2 != VINF_SUCCESS) \
3170 return rcStrict2; \
3171 } while (0)
3172#else
3173# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3174#endif
3175
3176#ifndef IEM_WITH_SETJMP
3177
3178/**
3179 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3180 *
3181 * @returns Strict VBox status code.
3182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3183 * @param pu64 Where to return the opcode qword.
3184 */
3185DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3186{
3187 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3188 if (rcStrict == VINF_SUCCESS)
3189 {
3190 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3191# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3192 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3193# else
3194 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3195 pVCpu->iem.s.abOpcode[offOpcode + 1],
3196 pVCpu->iem.s.abOpcode[offOpcode + 2],
3197 pVCpu->iem.s.abOpcode[offOpcode + 3],
3198 pVCpu->iem.s.abOpcode[offOpcode + 4],
3199 pVCpu->iem.s.abOpcode[offOpcode + 5],
3200 pVCpu->iem.s.abOpcode[offOpcode + 6],
3201 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3202# endif
3203 pVCpu->iem.s.offOpcode = offOpcode + 8;
3204 }
3205 else
3206 *pu64 = 0;
3207 return rcStrict;
3208}
3209
3210
3211/**
3212 * Fetches the next opcode qword.
3213 *
3214 * @returns Strict VBox status code.
3215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3216 * @param pu64 Where to return the opcode qword.
3217 */
3218DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3219{
3220 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3221 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3222 {
3223# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3224 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3225# else
3226 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3227 pVCpu->iem.s.abOpcode[offOpcode + 1],
3228 pVCpu->iem.s.abOpcode[offOpcode + 2],
3229 pVCpu->iem.s.abOpcode[offOpcode + 3],
3230 pVCpu->iem.s.abOpcode[offOpcode + 4],
3231 pVCpu->iem.s.abOpcode[offOpcode + 5],
3232 pVCpu->iem.s.abOpcode[offOpcode + 6],
3233 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3234# endif
3235 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3236 return VINF_SUCCESS;
3237 }
3238 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3239}
3240
3241#else /* IEM_WITH_SETJMP */
3242
3243/**
3244 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3245 *
3246 * @returns The opcode qword.
3247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3248 */
3249DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3250{
3251# ifdef IEM_WITH_CODE_TLB
3252 uint64_t u64;
3253 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3254 return u64;
3255# else
3256 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3257 if (rcStrict == VINF_SUCCESS)
3258 {
3259 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3260 pVCpu->iem.s.offOpcode = offOpcode + 8;
3261# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3262 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3263# else
3264 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3265 pVCpu->iem.s.abOpcode[offOpcode + 1],
3266 pVCpu->iem.s.abOpcode[offOpcode + 2],
3267 pVCpu->iem.s.abOpcode[offOpcode + 3],
3268 pVCpu->iem.s.abOpcode[offOpcode + 4],
3269 pVCpu->iem.s.abOpcode[offOpcode + 5],
3270 pVCpu->iem.s.abOpcode[offOpcode + 6],
3271 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3272# endif
3273 }
3274 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3275# endif
3276}
3277
3278
3279/**
3280 * Fetches the next opcode qword, longjmp on error.
3281 *
3282 * @returns The opcode qword.
3283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3284 */
3285DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3286{
3287# ifdef IEM_WITH_CODE_TLB
3288 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3289 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3290 if (RT_LIKELY( pbBuf != NULL
3291 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3292 {
3293 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3294# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3295 return *(uint64_t const *)&pbBuf[offBuf];
3296# else
3297 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3298 pbBuf[offBuf + 1],
3299 pbBuf[offBuf + 2],
3300 pbBuf[offBuf + 3],
3301 pbBuf[offBuf + 4],
3302 pbBuf[offBuf + 5],
3303 pbBuf[offBuf + 6],
3304 pbBuf[offBuf + 7]);
3305# endif
3306 }
3307# else
3308 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3309 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3310 {
3311 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3312# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3313 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3314# else
3315 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3316 pVCpu->iem.s.abOpcode[offOpcode + 1],
3317 pVCpu->iem.s.abOpcode[offOpcode + 2],
3318 pVCpu->iem.s.abOpcode[offOpcode + 3],
3319 pVCpu->iem.s.abOpcode[offOpcode + 4],
3320 pVCpu->iem.s.abOpcode[offOpcode + 5],
3321 pVCpu->iem.s.abOpcode[offOpcode + 6],
3322 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3323# endif
3324 }
3325# endif
3326 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3327}
3328
3329#endif /* IEM_WITH_SETJMP */
3330
3331/**
3332 * Fetches the next opcode quad word, returns automatically on failure.
3333 *
3334 * @param a_pu64 Where to return the opcode quad word.
3335 * @remark Implicitly references pVCpu.
3336 */
3337#ifndef IEM_WITH_SETJMP
3338# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3339 do \
3340 { \
3341 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3342 if (rcStrict2 != VINF_SUCCESS) \
3343 return rcStrict2; \
3344 } while (0)
3345#else
3346# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3347#endif
3348
3349
3350/** @name Misc Worker Functions.
3351 * @{
3352 */
3353
3354/**
3355 * Gets the exception class for the specified exception vector.
3356 *
3357 * @returns The class of the specified exception.
3358 * @param uVector The exception vector.
3359 */
3360IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3361{
3362 Assert(uVector <= X86_XCPT_LAST);
3363 switch (uVector)
3364 {
3365 case X86_XCPT_DE:
3366 case X86_XCPT_TS:
3367 case X86_XCPT_NP:
3368 case X86_XCPT_SS:
3369 case X86_XCPT_GP:
3370 case X86_XCPT_SX: /* AMD only */
3371 return IEMXCPTCLASS_CONTRIBUTORY;
3372
3373 case X86_XCPT_PF:
3374 case X86_XCPT_VE: /* Intel only */
3375 return IEMXCPTCLASS_PAGE_FAULT;
3376
3377 case X86_XCPT_DF:
3378 return IEMXCPTCLASS_DOUBLE_FAULT;
3379 }
3380 return IEMXCPTCLASS_BENIGN;
3381}
3382
3383
3384/**
3385 * Evaluates how to handle an exception caused during delivery of another event
3386 * (exception / interrupt).
3387 *
3388 * @returns How to handle the recursive exception.
3389 * @param pVCpu The cross context virtual CPU structure of the
3390 * calling thread.
3391 * @param fPrevFlags The flags of the previous event.
3392 * @param uPrevVector The vector of the previous event.
3393 * @param fCurFlags The flags of the current exception.
3394 * @param uCurVector The vector of the current exception.
3395 * @param pfXcptRaiseInfo Where to store additional information about the
3396 * exception condition. Optional.
3397 */
3398VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3399 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3400{
3401 /*
3402 * Only CPU exceptions can be raised while delivering other events, software interrupt
3403 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3404 */
3405 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3406 Assert(pVCpu); RT_NOREF(pVCpu);
3407 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3408
3409 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3410 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3411 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3412 {
3413 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3414 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3415 {
3416 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3417 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3418 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3419 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3420 {
3421 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3422 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3423 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3424 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3425 uCurVector, pVCpu->cpum.GstCtx.cr2));
3426 }
3427 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3428 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3429 {
3430 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3431 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3432 }
3433 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3434 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3435 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3436 {
3437 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3438 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3439 }
3440 }
3441 else
3442 {
3443 if (uPrevVector == X86_XCPT_NMI)
3444 {
3445 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3446 if (uCurVector == X86_XCPT_PF)
3447 {
3448 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3449 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3450 }
3451 }
3452 else if ( uPrevVector == X86_XCPT_AC
3453 && uCurVector == X86_XCPT_AC)
3454 {
3455 enmRaise = IEMXCPTRAISE_CPU_HANG;
3456 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3457 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3458 }
3459 }
3460 }
3461 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3462 {
3463 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3464 if (uCurVector == X86_XCPT_PF)
3465 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3466 }
3467 else
3468 {
3469 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3470 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3471 }
3472
3473 if (pfXcptRaiseInfo)
3474 *pfXcptRaiseInfo = fRaiseInfo;
3475 return enmRaise;
3476}
3477
3478
3479/**
3480 * Enters the CPU shutdown state initiated by a triple fault or other
3481 * unrecoverable conditions.
3482 *
3483 * @returns Strict VBox status code.
3484 * @param pVCpu The cross context virtual CPU structure of the
3485 * calling thread.
3486 */
3487IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3488{
3489 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3490 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3491
3492 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3493 {
3494 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3495 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3496 }
3497
3498 RT_NOREF(pVCpu);
3499 return VINF_EM_TRIPLE_FAULT;
3500}
3501
3502
3503/**
3504 * Validates a new SS segment.
3505 *
3506 * @returns VBox strict status code.
3507 * @param pVCpu The cross context virtual CPU structure of the
3508 * calling thread.
3509 * @param NewSS The new SS selctor.
3510 * @param uCpl The CPL to load the stack for.
3511 * @param pDesc Where to return the descriptor.
3512 */
3513IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3514{
3515 /* Null selectors are not allowed (we're not called for dispatching
3516 interrupts with SS=0 in long mode). */
3517 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3518 {
3519 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3520 return iemRaiseTaskSwitchFault0(pVCpu);
3521 }
3522
3523 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3524 if ((NewSS & X86_SEL_RPL) != uCpl)
3525 {
3526 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3527 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3528 }
3529
3530 /*
3531 * Read the descriptor.
3532 */
3533 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3534 if (rcStrict != VINF_SUCCESS)
3535 return rcStrict;
3536
3537 /*
3538 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3539 */
3540 if (!pDesc->Legacy.Gen.u1DescType)
3541 {
3542 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3543 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3544 }
3545
3546 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3547 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3548 {
3549 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3550 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3551 }
3552 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3553 {
3554 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3555 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3556 }
3557
3558 /* Is it there? */
3559 /** @todo testcase: Is this checked before the canonical / limit check below? */
3560 if (!pDesc->Legacy.Gen.u1Present)
3561 {
3562 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3563 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3564 }
3565
3566 return VINF_SUCCESS;
3567}
3568
3569
3570/**
3571 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3572 * not.
3573 *
3574 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3575 */
3576#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3577# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3578#else
3579# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3580#endif
3581
3582/**
3583 * Updates the EFLAGS in the correct manner wrt. PATM.
3584 *
3585 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3586 * @param a_fEfl The new EFLAGS.
3587 */
3588#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3589# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3590#else
3591# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3592#endif
3593
3594
3595/** @} */
3596
3597/** @name Raising Exceptions.
3598 *
3599 * @{
3600 */
3601
3602
3603/**
3604 * Loads the specified stack far pointer from the TSS.
3605 *
3606 * @returns VBox strict status code.
3607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3608 * @param uCpl The CPL to load the stack for.
3609 * @param pSelSS Where to return the new stack segment.
3610 * @param puEsp Where to return the new stack pointer.
3611 */
3612IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3613{
3614 VBOXSTRICTRC rcStrict;
3615 Assert(uCpl < 4);
3616
3617 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3618 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3619 {
3620 /*
3621 * 16-bit TSS (X86TSS16).
3622 */
3623 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3624 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3625 {
3626 uint32_t off = uCpl * 4 + 2;
3627 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3628 {
3629 /** @todo check actual access pattern here. */
3630 uint32_t u32Tmp = 0; /* gcc maybe... */
3631 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3632 if (rcStrict == VINF_SUCCESS)
3633 {
3634 *puEsp = RT_LOWORD(u32Tmp);
3635 *pSelSS = RT_HIWORD(u32Tmp);
3636 return VINF_SUCCESS;
3637 }
3638 }
3639 else
3640 {
3641 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3642 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3643 }
3644 break;
3645 }
3646
3647 /*
3648 * 32-bit TSS (X86TSS32).
3649 */
3650 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3651 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3652 {
3653 uint32_t off = uCpl * 8 + 4;
3654 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3655 {
3656/** @todo check actual access pattern here. */
3657 uint64_t u64Tmp;
3658 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3659 if (rcStrict == VINF_SUCCESS)
3660 {
3661 *puEsp = u64Tmp & UINT32_MAX;
3662 *pSelSS = (RTSEL)(u64Tmp >> 32);
3663 return VINF_SUCCESS;
3664 }
3665 }
3666 else
3667 {
3668 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3669 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3670 }
3671 break;
3672 }
3673
3674 default:
3675 AssertFailed();
3676 rcStrict = VERR_IEM_IPE_4;
3677 break;
3678 }
3679
3680 *puEsp = 0; /* make gcc happy */
3681 *pSelSS = 0; /* make gcc happy */
3682 return rcStrict;
3683}
3684
3685
3686/**
3687 * Loads the specified stack pointer from the 64-bit TSS.
3688 *
3689 * @returns VBox strict status code.
3690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3691 * @param uCpl The CPL to load the stack for.
3692 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3693 * @param puRsp Where to return the new stack pointer.
3694 */
3695IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3696{
3697 Assert(uCpl < 4);
3698 Assert(uIst < 8);
3699 *puRsp = 0; /* make gcc happy */
3700
3701 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3702 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3703
3704 uint32_t off;
3705 if (uIst)
3706 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3707 else
3708 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3709 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3710 {
3711 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3712 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3713 }
3714
3715 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3716}
3717
3718
3719/**
3720 * Adjust the CPU state according to the exception being raised.
3721 *
3722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3723 * @param u8Vector The exception that has been raised.
3724 */
3725DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3726{
3727 switch (u8Vector)
3728 {
3729 case X86_XCPT_DB:
3730 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3731 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3732 break;
3733 /** @todo Read the AMD and Intel exception reference... */
3734 }
3735}
3736
3737
3738/**
3739 * Implements exceptions and interrupts for real mode.
3740 *
3741 * @returns VBox strict status code.
3742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3743 * @param cbInstr The number of bytes to offset rIP by in the return
3744 * address.
3745 * @param u8Vector The interrupt / exception vector number.
3746 * @param fFlags The flags.
3747 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3748 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3749 */
3750IEM_STATIC VBOXSTRICTRC
3751iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3752 uint8_t cbInstr,
3753 uint8_t u8Vector,
3754 uint32_t fFlags,
3755 uint16_t uErr,
3756 uint64_t uCr2)
3757{
3758 NOREF(uErr); NOREF(uCr2);
3759 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3760
3761 /*
3762 * Read the IDT entry.
3763 */
3764 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3765 {
3766 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3767 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3768 }
3769 RTFAR16 Idte;
3770 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3771 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3772 {
3773 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3774 return rcStrict;
3775 }
3776
3777 /*
3778 * Push the stack frame.
3779 */
3780 uint16_t *pu16Frame;
3781 uint64_t uNewRsp;
3782 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3783 if (rcStrict != VINF_SUCCESS)
3784 return rcStrict;
3785
3786 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3787#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3788 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3789 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3790 fEfl |= UINT16_C(0xf000);
3791#endif
3792 pu16Frame[2] = (uint16_t)fEfl;
3793 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3794 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3795 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3796 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3797 return rcStrict;
3798
3799 /*
3800 * Load the vector address into cs:ip and make exception specific state
3801 * adjustments.
3802 */
3803 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3804 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3805 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3806 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3807 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3808 pVCpu->cpum.GstCtx.rip = Idte.off;
3809 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3810 IEMMISC_SET_EFL(pVCpu, fEfl);
3811
3812 /** @todo do we actually do this in real mode? */
3813 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3814 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3815
3816 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3817}
3818
3819
3820/**
3821 * Loads a NULL data selector into when coming from V8086 mode.
3822 *
3823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3824 * @param pSReg Pointer to the segment register.
3825 */
3826IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3827{
3828 pSReg->Sel = 0;
3829 pSReg->ValidSel = 0;
3830 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3831 {
3832 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3833 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3834 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3835 }
3836 else
3837 {
3838 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3839 /** @todo check this on AMD-V */
3840 pSReg->u64Base = 0;
3841 pSReg->u32Limit = 0;
3842 }
3843}
3844
3845
3846/**
3847 * Loads a segment selector during a task switch in V8086 mode.
3848 *
3849 * @param pSReg Pointer to the segment register.
3850 * @param uSel The selector value to load.
3851 */
3852IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3853{
3854 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3855 pSReg->Sel = uSel;
3856 pSReg->ValidSel = uSel;
3857 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3858 pSReg->u64Base = uSel << 4;
3859 pSReg->u32Limit = 0xffff;
3860 pSReg->Attr.u = 0xf3;
3861}
3862
3863
3864/**
3865 * Loads a NULL data selector into a selector register, both the hidden and
3866 * visible parts, in protected mode.
3867 *
3868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3869 * @param pSReg Pointer to the segment register.
3870 * @param uRpl The RPL.
3871 */
3872IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3873{
3874 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3875 * data selector in protected mode. */
3876 pSReg->Sel = uRpl;
3877 pSReg->ValidSel = uRpl;
3878 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3879 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3880 {
3881 /* VT-x (Intel 3960x) observed doing something like this. */
3882 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3883 pSReg->u32Limit = UINT32_MAX;
3884 pSReg->u64Base = 0;
3885 }
3886 else
3887 {
3888 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3889 pSReg->u32Limit = 0;
3890 pSReg->u64Base = 0;
3891 }
3892}
3893
3894
3895/**
3896 * Loads a segment selector during a task switch in protected mode.
3897 *
3898 * In this task switch scenario, we would throw \#TS exceptions rather than
3899 * \#GPs.
3900 *
3901 * @returns VBox strict status code.
3902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3903 * @param pSReg Pointer to the segment register.
3904 * @param uSel The new selector value.
3905 *
3906 * @remarks This does _not_ handle CS or SS.
3907 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3908 */
3909IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3910{
3911 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3912
3913 /* Null data selector. */
3914 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3915 {
3916 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3917 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3918 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3919 return VINF_SUCCESS;
3920 }
3921
3922 /* Fetch the descriptor. */
3923 IEMSELDESC Desc;
3924 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3925 if (rcStrict != VINF_SUCCESS)
3926 {
3927 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3928 VBOXSTRICTRC_VAL(rcStrict)));
3929 return rcStrict;
3930 }
3931
3932 /* Must be a data segment or readable code segment. */
3933 if ( !Desc.Legacy.Gen.u1DescType
3934 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3935 {
3936 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3937 Desc.Legacy.Gen.u4Type));
3938 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3939 }
3940
3941 /* Check privileges for data segments and non-conforming code segments. */
3942 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3943 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3944 {
3945 /* The RPL and the new CPL must be less than or equal to the DPL. */
3946 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3947 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3948 {
3949 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3950 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3951 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3952 }
3953 }
3954
3955 /* Is it there? */
3956 if (!Desc.Legacy.Gen.u1Present)
3957 {
3958 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3959 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3960 }
3961
3962 /* The base and limit. */
3963 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3964 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3965
3966 /*
3967 * Ok, everything checked out fine. Now set the accessed bit before
3968 * committing the result into the registers.
3969 */
3970 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3971 {
3972 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3973 if (rcStrict != VINF_SUCCESS)
3974 return rcStrict;
3975 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3976 }
3977
3978 /* Commit */
3979 pSReg->Sel = uSel;
3980 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3981 pSReg->u32Limit = cbLimit;
3982 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3983 pSReg->ValidSel = uSel;
3984 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3985 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3986 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3987
3988 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3989 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3990 return VINF_SUCCESS;
3991}
3992
3993
3994/**
3995 * Performs a task switch.
3996 *
3997 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3998 * caller is responsible for performing the necessary checks (like DPL, TSS
3999 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
4000 * reference for JMP, CALL, IRET.
4001 *
4002 * If the task switch is the due to a software interrupt or hardware exception,
4003 * the caller is responsible for validating the TSS selector and descriptor. See
4004 * Intel Instruction reference for INT n.
4005 *
4006 * @returns VBox strict status code.
4007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4008 * @param enmTaskSwitch The cause of the task switch.
4009 * @param uNextEip The EIP effective after the task switch.
4010 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4011 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4012 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4013 * @param SelTSS The TSS selector of the new task.
4014 * @param pNewDescTSS Pointer to the new TSS descriptor.
4015 */
4016IEM_STATIC VBOXSTRICTRC
4017iemTaskSwitch(PVMCPU pVCpu,
4018 IEMTASKSWITCH enmTaskSwitch,
4019 uint32_t uNextEip,
4020 uint32_t fFlags,
4021 uint16_t uErr,
4022 uint64_t uCr2,
4023 RTSEL SelTSS,
4024 PIEMSELDESC pNewDescTSS)
4025{
4026 Assert(!IEM_IS_REAL_MODE(pVCpu));
4027 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4028 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4029
4030 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4031 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4032 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4033 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4034 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4035
4036 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4037 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4038
4039 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4040 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4041
4042 /* Update CR2 in case it's a page-fault. */
4043 /** @todo This should probably be done much earlier in IEM/PGM. See
4044 * @bugref{5653#c49}. */
4045 if (fFlags & IEM_XCPT_FLAGS_CR2)
4046 pVCpu->cpum.GstCtx.cr2 = uCr2;
4047
4048 /*
4049 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4050 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4051 */
4052 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4053 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4054 if (uNewTSSLimit < uNewTSSLimitMin)
4055 {
4056 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4057 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4058 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4059 }
4060
4061 /*
4062 * Task switches in VMX non-root mode always cause task switches.
4063 * The new TSS must have been read and validated (DPL, limits etc.) before a
4064 * task-switch VM-exit commences.
4065 *
4066 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4067 */
4068 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4069 {
4070 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4071 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4072 }
4073
4074 /*
4075 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4076 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4077 */
4078 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4079 {
4080 uint32_t const uExitInfo1 = SelTSS;
4081 uint32_t uExitInfo2 = uErr;
4082 switch (enmTaskSwitch)
4083 {
4084 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4085 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4086 default: break;
4087 }
4088 if (fFlags & IEM_XCPT_FLAGS_ERR)
4089 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4090 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4091 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4092
4093 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4094 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4095 RT_NOREF2(uExitInfo1, uExitInfo2);
4096 }
4097
4098 /*
4099 * Check the current TSS limit. The last written byte to the current TSS during the
4100 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4101 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4102 *
4103 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4104 * end up with smaller than "legal" TSS limits.
4105 */
4106 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4107 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4108 if (uCurTSSLimit < uCurTSSLimitMin)
4109 {
4110 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4111 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4112 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4113 }
4114
4115 /*
4116 * Verify that the new TSS can be accessed and map it. Map only the required contents
4117 * and not the entire TSS.
4118 */
4119 void *pvNewTSS;
4120 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4121 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4122 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4123 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4124 * not perform correct translation if this happens. See Intel spec. 7.2.1
4125 * "Task-State Segment" */
4126 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4127 if (rcStrict != VINF_SUCCESS)
4128 {
4129 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4130 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4131 return rcStrict;
4132 }
4133
4134 /*
4135 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4136 */
4137 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4138 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4139 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4140 {
4141 PX86DESC pDescCurTSS;
4142 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4143 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4144 if (rcStrict != VINF_SUCCESS)
4145 {
4146 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4147 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4148 return rcStrict;
4149 }
4150
4151 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4152 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4153 if (rcStrict != VINF_SUCCESS)
4154 {
4155 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4156 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4157 return rcStrict;
4158 }
4159
4160 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4161 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4162 {
4163 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4164 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4165 u32EFlags &= ~X86_EFL_NT;
4166 }
4167 }
4168
4169 /*
4170 * Save the CPU state into the current TSS.
4171 */
4172 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4173 if (GCPtrNewTSS == GCPtrCurTSS)
4174 {
4175 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4176 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4177 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4178 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4179 pVCpu->cpum.GstCtx.ldtr.Sel));
4180 }
4181 if (fIsNewTSS386)
4182 {
4183 /*
4184 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4185 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4186 */
4187 void *pvCurTSS32;
4188 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4189 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4190 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4191 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4192 if (rcStrict != VINF_SUCCESS)
4193 {
4194 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4195 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4196 return rcStrict;
4197 }
4198
4199 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4200 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4201 pCurTSS32->eip = uNextEip;
4202 pCurTSS32->eflags = u32EFlags;
4203 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4204 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4205 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4206 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4207 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4208 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4209 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4210 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4211 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4212 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4213 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4214 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4215 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4216 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4217
4218 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4219 if (rcStrict != VINF_SUCCESS)
4220 {
4221 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4222 VBOXSTRICTRC_VAL(rcStrict)));
4223 return rcStrict;
4224 }
4225 }
4226 else
4227 {
4228 /*
4229 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4230 */
4231 void *pvCurTSS16;
4232 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4233 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4234 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4235 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4236 if (rcStrict != VINF_SUCCESS)
4237 {
4238 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4239 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4240 return rcStrict;
4241 }
4242
4243 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4244 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4245 pCurTSS16->ip = uNextEip;
4246 pCurTSS16->flags = u32EFlags;
4247 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4248 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4249 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4250 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4251 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4252 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4253 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4254 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4255 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4256 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4257 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4258 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4259
4260 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4261 if (rcStrict != VINF_SUCCESS)
4262 {
4263 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4264 VBOXSTRICTRC_VAL(rcStrict)));
4265 return rcStrict;
4266 }
4267 }
4268
4269 /*
4270 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4271 */
4272 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4273 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4274 {
4275 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4276 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4277 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4278 }
4279
4280 /*
4281 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4282 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4283 */
4284 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4285 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4286 bool fNewDebugTrap;
4287 if (fIsNewTSS386)
4288 {
4289 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4290 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4291 uNewEip = pNewTSS32->eip;
4292 uNewEflags = pNewTSS32->eflags;
4293 uNewEax = pNewTSS32->eax;
4294 uNewEcx = pNewTSS32->ecx;
4295 uNewEdx = pNewTSS32->edx;
4296 uNewEbx = pNewTSS32->ebx;
4297 uNewEsp = pNewTSS32->esp;
4298 uNewEbp = pNewTSS32->ebp;
4299 uNewEsi = pNewTSS32->esi;
4300 uNewEdi = pNewTSS32->edi;
4301 uNewES = pNewTSS32->es;
4302 uNewCS = pNewTSS32->cs;
4303 uNewSS = pNewTSS32->ss;
4304 uNewDS = pNewTSS32->ds;
4305 uNewFS = pNewTSS32->fs;
4306 uNewGS = pNewTSS32->gs;
4307 uNewLdt = pNewTSS32->selLdt;
4308 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4309 }
4310 else
4311 {
4312 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4313 uNewCr3 = 0;
4314 uNewEip = pNewTSS16->ip;
4315 uNewEflags = pNewTSS16->flags;
4316 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4317 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4318 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4319 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4320 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4321 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4322 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4323 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4324 uNewES = pNewTSS16->es;
4325 uNewCS = pNewTSS16->cs;
4326 uNewSS = pNewTSS16->ss;
4327 uNewDS = pNewTSS16->ds;
4328 uNewFS = 0;
4329 uNewGS = 0;
4330 uNewLdt = pNewTSS16->selLdt;
4331 fNewDebugTrap = false;
4332 }
4333
4334 if (GCPtrNewTSS == GCPtrCurTSS)
4335 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4336 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4337
4338 /*
4339 * We're done accessing the new TSS.
4340 */
4341 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4342 if (rcStrict != VINF_SUCCESS)
4343 {
4344 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4345 return rcStrict;
4346 }
4347
4348 /*
4349 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4350 */
4351 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4352 {
4353 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4354 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4355 if (rcStrict != VINF_SUCCESS)
4356 {
4357 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4358 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4359 return rcStrict;
4360 }
4361
4362 /* Check that the descriptor indicates the new TSS is available (not busy). */
4363 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4364 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4365 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4366
4367 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4368 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4369 if (rcStrict != VINF_SUCCESS)
4370 {
4371 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4372 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4373 return rcStrict;
4374 }
4375 }
4376
4377 /*
4378 * From this point on, we're technically in the new task. We will defer exceptions
4379 * until the completion of the task switch but before executing any instructions in the new task.
4380 */
4381 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4382 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4383 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4384 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4385 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4386 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4387 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4388
4389 /* Set the busy bit in TR. */
4390 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4391 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4392 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4393 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4394 {
4395 uNewEflags |= X86_EFL_NT;
4396 }
4397
4398 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4399 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4400 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4401
4402 pVCpu->cpum.GstCtx.eip = uNewEip;
4403 pVCpu->cpum.GstCtx.eax = uNewEax;
4404 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4405 pVCpu->cpum.GstCtx.edx = uNewEdx;
4406 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4407 pVCpu->cpum.GstCtx.esp = uNewEsp;
4408 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4409 pVCpu->cpum.GstCtx.esi = uNewEsi;
4410 pVCpu->cpum.GstCtx.edi = uNewEdi;
4411
4412 uNewEflags &= X86_EFL_LIVE_MASK;
4413 uNewEflags |= X86_EFL_RA1_MASK;
4414 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4415
4416 /*
4417 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4418 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4419 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4420 */
4421 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4422 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4423
4424 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4425 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4426
4427 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4428 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4429
4430 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4431 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4432
4433 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4434 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4435
4436 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4437 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4438 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4439
4440 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4441 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4442 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4443 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4444
4445 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4446 {
4447 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4448 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4449 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4450 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4451 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4452 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4453 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4454 }
4455
4456 /*
4457 * Switch CR3 for the new task.
4458 */
4459 if ( fIsNewTSS386
4460 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4461 {
4462 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4463 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4464 AssertRCSuccessReturn(rc, rc);
4465
4466 /* Inform PGM. */
4467 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4468 AssertRCReturn(rc, rc);
4469 /* ignore informational status codes */
4470
4471 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4472 }
4473
4474 /*
4475 * Switch LDTR for the new task.
4476 */
4477 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4478 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4479 else
4480 {
4481 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4482
4483 IEMSELDESC DescNewLdt;
4484 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4485 if (rcStrict != VINF_SUCCESS)
4486 {
4487 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4488 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4489 return rcStrict;
4490 }
4491 if ( !DescNewLdt.Legacy.Gen.u1Present
4492 || DescNewLdt.Legacy.Gen.u1DescType
4493 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4494 {
4495 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4496 uNewLdt, DescNewLdt.Legacy.u));
4497 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4498 }
4499
4500 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4501 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4502 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4503 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4504 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4505 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4506 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4507 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4508 }
4509
4510 IEMSELDESC DescSS;
4511 if (IEM_IS_V86_MODE(pVCpu))
4512 {
4513 pVCpu->iem.s.uCpl = 3;
4514 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4515 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4516 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4517 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4518 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4519 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4520
4521 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4522 DescSS.Legacy.u = 0;
4523 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4524 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4525 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4526 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4527 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4528 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4529 DescSS.Legacy.Gen.u2Dpl = 3;
4530 }
4531 else
4532 {
4533 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4534
4535 /*
4536 * Load the stack segment for the new task.
4537 */
4538 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4539 {
4540 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4541 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4542 }
4543
4544 /* Fetch the descriptor. */
4545 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4546 if (rcStrict != VINF_SUCCESS)
4547 {
4548 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4549 VBOXSTRICTRC_VAL(rcStrict)));
4550 return rcStrict;
4551 }
4552
4553 /* SS must be a data segment and writable. */
4554 if ( !DescSS.Legacy.Gen.u1DescType
4555 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4556 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4557 {
4558 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4559 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4560 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4561 }
4562
4563 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4564 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4565 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4566 {
4567 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4568 uNewCpl));
4569 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4570 }
4571
4572 /* Is it there? */
4573 if (!DescSS.Legacy.Gen.u1Present)
4574 {
4575 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4576 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4577 }
4578
4579 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4580 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4581
4582 /* Set the accessed bit before committing the result into SS. */
4583 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4584 {
4585 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4586 if (rcStrict != VINF_SUCCESS)
4587 return rcStrict;
4588 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4589 }
4590
4591 /* Commit SS. */
4592 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4593 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4594 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4595 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4596 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4597 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4598 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4599
4600 /* CPL has changed, update IEM before loading rest of segments. */
4601 pVCpu->iem.s.uCpl = uNewCpl;
4602
4603 /*
4604 * Load the data segments for the new task.
4605 */
4606 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4607 if (rcStrict != VINF_SUCCESS)
4608 return rcStrict;
4609 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4610 if (rcStrict != VINF_SUCCESS)
4611 return rcStrict;
4612 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4613 if (rcStrict != VINF_SUCCESS)
4614 return rcStrict;
4615 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4616 if (rcStrict != VINF_SUCCESS)
4617 return rcStrict;
4618
4619 /*
4620 * Load the code segment for the new task.
4621 */
4622 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4623 {
4624 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4625 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4626 }
4627
4628 /* Fetch the descriptor. */
4629 IEMSELDESC DescCS;
4630 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4631 if (rcStrict != VINF_SUCCESS)
4632 {
4633 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4634 return rcStrict;
4635 }
4636
4637 /* CS must be a code segment. */
4638 if ( !DescCS.Legacy.Gen.u1DescType
4639 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4640 {
4641 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4642 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4643 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4644 }
4645
4646 /* For conforming CS, DPL must be less than or equal to the RPL. */
4647 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4648 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4649 {
4650 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4651 DescCS.Legacy.Gen.u2Dpl));
4652 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4653 }
4654
4655 /* For non-conforming CS, DPL must match RPL. */
4656 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4657 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4658 {
4659 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4660 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4661 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4662 }
4663
4664 /* Is it there? */
4665 if (!DescCS.Legacy.Gen.u1Present)
4666 {
4667 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4668 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4669 }
4670
4671 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4672 u64Base = X86DESC_BASE(&DescCS.Legacy);
4673
4674 /* Set the accessed bit before committing the result into CS. */
4675 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4676 {
4677 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4678 if (rcStrict != VINF_SUCCESS)
4679 return rcStrict;
4680 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4681 }
4682
4683 /* Commit CS. */
4684 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4685 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4686 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4687 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4688 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4689 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4690 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4691 }
4692
4693 /** @todo Debug trap. */
4694 if (fIsNewTSS386 && fNewDebugTrap)
4695 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4696
4697 /*
4698 * Construct the error code masks based on what caused this task switch.
4699 * See Intel Instruction reference for INT.
4700 */
4701 uint16_t uExt;
4702 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4703 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4704 {
4705 uExt = 1;
4706 }
4707 else
4708 uExt = 0;
4709
4710 /*
4711 * Push any error code on to the new stack.
4712 */
4713 if (fFlags & IEM_XCPT_FLAGS_ERR)
4714 {
4715 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4716 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4717 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4718
4719 /* Check that there is sufficient space on the stack. */
4720 /** @todo Factor out segment limit checking for normal/expand down segments
4721 * into a separate function. */
4722 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4723 {
4724 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4725 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4726 {
4727 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4728 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4729 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4730 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4731 }
4732 }
4733 else
4734 {
4735 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4736 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4737 {
4738 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4739 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4740 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4741 }
4742 }
4743
4744
4745 if (fIsNewTSS386)
4746 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4747 else
4748 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4749 if (rcStrict != VINF_SUCCESS)
4750 {
4751 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4752 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4753 return rcStrict;
4754 }
4755 }
4756
4757 /* Check the new EIP against the new CS limit. */
4758 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4759 {
4760 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4761 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4762 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4763 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4764 }
4765
4766 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4767 pVCpu->cpum.GstCtx.ss.Sel));
4768 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4769}
4770
4771
4772/**
4773 * Implements exceptions and interrupts for protected mode.
4774 *
4775 * @returns VBox strict status code.
4776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4777 * @param cbInstr The number of bytes to offset rIP by in the return
4778 * address.
4779 * @param u8Vector The interrupt / exception vector number.
4780 * @param fFlags The flags.
4781 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4782 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4783 */
4784IEM_STATIC VBOXSTRICTRC
4785iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4786 uint8_t cbInstr,
4787 uint8_t u8Vector,
4788 uint32_t fFlags,
4789 uint16_t uErr,
4790 uint64_t uCr2)
4791{
4792 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4793
4794 /*
4795 * Read the IDT entry.
4796 */
4797 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4798 {
4799 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4800 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4801 }
4802 X86DESC Idte;
4803 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4804 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4805 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4806 {
4807 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4808 return rcStrict;
4809 }
4810 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4811 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4812 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4813
4814 /*
4815 * Check the descriptor type, DPL and such.
4816 * ASSUMES this is done in the same order as described for call-gate calls.
4817 */
4818 if (Idte.Gate.u1DescType)
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4821 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4822 }
4823 bool fTaskGate = false;
4824 uint8_t f32BitGate = true;
4825 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4826 switch (Idte.Gate.u4Type)
4827 {
4828 case X86_SEL_TYPE_SYS_UNDEFINED:
4829 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4830 case X86_SEL_TYPE_SYS_LDT:
4831 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4832 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4833 case X86_SEL_TYPE_SYS_UNDEFINED2:
4834 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4835 case X86_SEL_TYPE_SYS_UNDEFINED3:
4836 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4837 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4838 case X86_SEL_TYPE_SYS_UNDEFINED4:
4839 {
4840 /** @todo check what actually happens when the type is wrong...
4841 * esp. call gates. */
4842 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4843 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4844 }
4845
4846 case X86_SEL_TYPE_SYS_286_INT_GATE:
4847 f32BitGate = false;
4848 RT_FALL_THRU();
4849 case X86_SEL_TYPE_SYS_386_INT_GATE:
4850 fEflToClear |= X86_EFL_IF;
4851 break;
4852
4853 case X86_SEL_TYPE_SYS_TASK_GATE:
4854 fTaskGate = true;
4855#ifndef IEM_IMPLEMENTS_TASKSWITCH
4856 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4857#endif
4858 break;
4859
4860 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4861 f32BitGate = false;
4862 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4863 break;
4864
4865 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4866 }
4867
4868 /* Check DPL against CPL if applicable. */
4869 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4870 {
4871 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4872 {
4873 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4874 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4875 }
4876 }
4877
4878 /* Is it there? */
4879 if (!Idte.Gate.u1Present)
4880 {
4881 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4882 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4883 }
4884
4885 /* Is it a task-gate? */
4886 if (fTaskGate)
4887 {
4888 /*
4889 * Construct the error code masks based on what caused this task switch.
4890 * See Intel Instruction reference for INT.
4891 */
4892 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4893 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4894 RTSEL SelTSS = Idte.Gate.u16Sel;
4895
4896 /*
4897 * Fetch the TSS descriptor in the GDT.
4898 */
4899 IEMSELDESC DescTSS;
4900 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4901 if (rcStrict != VINF_SUCCESS)
4902 {
4903 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4904 VBOXSTRICTRC_VAL(rcStrict)));
4905 return rcStrict;
4906 }
4907
4908 /* The TSS descriptor must be a system segment and be available (not busy). */
4909 if ( DescTSS.Legacy.Gen.u1DescType
4910 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4911 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4912 {
4913 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4914 u8Vector, SelTSS, DescTSS.Legacy.au64));
4915 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4916 }
4917
4918 /* The TSS must be present. */
4919 if (!DescTSS.Legacy.Gen.u1Present)
4920 {
4921 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4922 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4923 }
4924
4925 /* Do the actual task switch. */
4926 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4927 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4928 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4929 }
4930
4931 /* A null CS is bad. */
4932 RTSEL NewCS = Idte.Gate.u16Sel;
4933 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4934 {
4935 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4936 return iemRaiseGeneralProtectionFault0(pVCpu);
4937 }
4938
4939 /* Fetch the descriptor for the new CS. */
4940 IEMSELDESC DescCS;
4941 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4942 if (rcStrict != VINF_SUCCESS)
4943 {
4944 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4945 return rcStrict;
4946 }
4947
4948 /* Must be a code segment. */
4949 if (!DescCS.Legacy.Gen.u1DescType)
4950 {
4951 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4952 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4953 }
4954 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4955 {
4956 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4957 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4958 }
4959
4960 /* Don't allow lowering the privilege level. */
4961 /** @todo Does the lowering of privileges apply to software interrupts
4962 * only? This has bearings on the more-privileged or
4963 * same-privilege stack behavior further down. A testcase would
4964 * be nice. */
4965 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4966 {
4967 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4968 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4969 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4970 }
4971
4972 /* Make sure the selector is present. */
4973 if (!DescCS.Legacy.Gen.u1Present)
4974 {
4975 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4976 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4977 }
4978
4979 /* Check the new EIP against the new CS limit. */
4980 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4981 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4982 ? Idte.Gate.u16OffsetLow
4983 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4984 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4985 if (uNewEip > cbLimitCS)
4986 {
4987 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4988 u8Vector, uNewEip, cbLimitCS, NewCS));
4989 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4990 }
4991 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4992
4993 /* Calc the flag image to push. */
4994 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4995 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4996 fEfl &= ~X86_EFL_RF;
4997 else
4998 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4999
5000 /* From V8086 mode only go to CPL 0. */
5001 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5002 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5003 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5004 {
5005 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5006 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5007 }
5008
5009 /*
5010 * If the privilege level changes, we need to get a new stack from the TSS.
5011 * This in turns means validating the new SS and ESP...
5012 */
5013 if (uNewCpl != pVCpu->iem.s.uCpl)
5014 {
5015 RTSEL NewSS;
5016 uint32_t uNewEsp;
5017 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020
5021 IEMSELDESC DescSS;
5022 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5023 if (rcStrict != VINF_SUCCESS)
5024 return rcStrict;
5025 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5026 if (!DescSS.Legacy.Gen.u1DefBig)
5027 {
5028 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5029 uNewEsp = (uint16_t)uNewEsp;
5030 }
5031
5032 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5033
5034 /* Check that there is sufficient space for the stack frame. */
5035 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5036 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5037 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5038 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5039
5040 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5041 {
5042 if ( uNewEsp - 1 > cbLimitSS
5043 || uNewEsp < cbStackFrame)
5044 {
5045 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5046 u8Vector, NewSS, uNewEsp, cbStackFrame));
5047 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5048 }
5049 }
5050 else
5051 {
5052 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5053 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5054 {
5055 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5056 u8Vector, NewSS, uNewEsp, cbStackFrame));
5057 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5058 }
5059 }
5060
5061 /*
5062 * Start making changes.
5063 */
5064
5065 /* Set the new CPL so that stack accesses use it. */
5066 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5067 pVCpu->iem.s.uCpl = uNewCpl;
5068
5069 /* Create the stack frame. */
5070 RTPTRUNION uStackFrame;
5071 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5072 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5073 if (rcStrict != VINF_SUCCESS)
5074 return rcStrict;
5075 void * const pvStackFrame = uStackFrame.pv;
5076 if (f32BitGate)
5077 {
5078 if (fFlags & IEM_XCPT_FLAGS_ERR)
5079 *uStackFrame.pu32++ = uErr;
5080 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5081 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5082 uStackFrame.pu32[2] = fEfl;
5083 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5084 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5085 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5086 if (fEfl & X86_EFL_VM)
5087 {
5088 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5089 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5090 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5091 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5092 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5093 }
5094 }
5095 else
5096 {
5097 if (fFlags & IEM_XCPT_FLAGS_ERR)
5098 *uStackFrame.pu16++ = uErr;
5099 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5100 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5101 uStackFrame.pu16[2] = fEfl;
5102 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5103 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5104 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5105 if (fEfl & X86_EFL_VM)
5106 {
5107 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5108 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5109 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5110 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5111 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5112 }
5113 }
5114 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5115 if (rcStrict != VINF_SUCCESS)
5116 return rcStrict;
5117
5118 /* Mark the selectors 'accessed' (hope this is the correct time). */
5119 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5120 * after pushing the stack frame? (Write protect the gdt + stack to
5121 * find out.) */
5122 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5123 {
5124 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5125 if (rcStrict != VINF_SUCCESS)
5126 return rcStrict;
5127 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5128 }
5129
5130 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5131 {
5132 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5133 if (rcStrict != VINF_SUCCESS)
5134 return rcStrict;
5135 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5136 }
5137
5138 /*
5139 * Start comitting the register changes (joins with the DPL=CPL branch).
5140 */
5141 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5142 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5143 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5144 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5145 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5146 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5147 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5148 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5149 * SP is loaded).
5150 * Need to check the other combinations too:
5151 * - 16-bit TSS, 32-bit handler
5152 * - 32-bit TSS, 16-bit handler */
5153 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5154 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5155 else
5156 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5157
5158 if (fEfl & X86_EFL_VM)
5159 {
5160 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5161 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5162 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5163 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5164 }
5165 }
5166 /*
5167 * Same privilege, no stack change and smaller stack frame.
5168 */
5169 else
5170 {
5171 uint64_t uNewRsp;
5172 RTPTRUNION uStackFrame;
5173 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5174 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5175 if (rcStrict != VINF_SUCCESS)
5176 return rcStrict;
5177 void * const pvStackFrame = uStackFrame.pv;
5178
5179 if (f32BitGate)
5180 {
5181 if (fFlags & IEM_XCPT_FLAGS_ERR)
5182 *uStackFrame.pu32++ = uErr;
5183 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5184 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5185 uStackFrame.pu32[2] = fEfl;
5186 }
5187 else
5188 {
5189 if (fFlags & IEM_XCPT_FLAGS_ERR)
5190 *uStackFrame.pu16++ = uErr;
5191 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5192 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5193 uStackFrame.pu16[2] = fEfl;
5194 }
5195 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5196 if (rcStrict != VINF_SUCCESS)
5197 return rcStrict;
5198
5199 /* Mark the CS selector as 'accessed'. */
5200 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5201 {
5202 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5203 if (rcStrict != VINF_SUCCESS)
5204 return rcStrict;
5205 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5206 }
5207
5208 /*
5209 * Start committing the register changes (joins with the other branch).
5210 */
5211 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5212 }
5213
5214 /* ... register committing continues. */
5215 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5216 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5217 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5218 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5219 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5220 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5221
5222 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5223 fEfl &= ~fEflToClear;
5224 IEMMISC_SET_EFL(pVCpu, fEfl);
5225
5226 if (fFlags & IEM_XCPT_FLAGS_CR2)
5227 pVCpu->cpum.GstCtx.cr2 = uCr2;
5228
5229 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5230 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5231
5232 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5233}
5234
5235
5236/**
5237 * Implements exceptions and interrupts for long mode.
5238 *
5239 * @returns VBox strict status code.
5240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5241 * @param cbInstr The number of bytes to offset rIP by in the return
5242 * address.
5243 * @param u8Vector The interrupt / exception vector number.
5244 * @param fFlags The flags.
5245 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5246 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5247 */
5248IEM_STATIC VBOXSTRICTRC
5249iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5250 uint8_t cbInstr,
5251 uint8_t u8Vector,
5252 uint32_t fFlags,
5253 uint16_t uErr,
5254 uint64_t uCr2)
5255{
5256 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5257
5258 /*
5259 * Read the IDT entry.
5260 */
5261 uint16_t offIdt = (uint16_t)u8Vector << 4;
5262 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5263 {
5264 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5265 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5266 }
5267 X86DESC64 Idte;
5268 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5269 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5270 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5271 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5272 {
5273 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5274 return rcStrict;
5275 }
5276 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5277 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5278 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5279
5280 /*
5281 * Check the descriptor type, DPL and such.
5282 * ASSUMES this is done in the same order as described for call-gate calls.
5283 */
5284 if (Idte.Gate.u1DescType)
5285 {
5286 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5287 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5288 }
5289 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5290 switch (Idte.Gate.u4Type)
5291 {
5292 case AMD64_SEL_TYPE_SYS_INT_GATE:
5293 fEflToClear |= X86_EFL_IF;
5294 break;
5295 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5296 break;
5297
5298 default:
5299 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5300 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5301 }
5302
5303 /* Check DPL against CPL if applicable. */
5304 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5305 {
5306 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5307 {
5308 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5309 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5310 }
5311 }
5312
5313 /* Is it there? */
5314 if (!Idte.Gate.u1Present)
5315 {
5316 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5317 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5318 }
5319
5320 /* A null CS is bad. */
5321 RTSEL NewCS = Idte.Gate.u16Sel;
5322 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5323 {
5324 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5325 return iemRaiseGeneralProtectionFault0(pVCpu);
5326 }
5327
5328 /* Fetch the descriptor for the new CS. */
5329 IEMSELDESC DescCS;
5330 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5331 if (rcStrict != VINF_SUCCESS)
5332 {
5333 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5334 return rcStrict;
5335 }
5336
5337 /* Must be a 64-bit code segment. */
5338 if (!DescCS.Long.Gen.u1DescType)
5339 {
5340 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5341 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5342 }
5343 if ( !DescCS.Long.Gen.u1Long
5344 || DescCS.Long.Gen.u1DefBig
5345 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5346 {
5347 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5348 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5349 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5350 }
5351
5352 /* Don't allow lowering the privilege level. For non-conforming CS
5353 selectors, the CS.DPL sets the privilege level the trap/interrupt
5354 handler runs at. For conforming CS selectors, the CPL remains
5355 unchanged, but the CS.DPL must be <= CPL. */
5356 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5357 * when CPU in Ring-0. Result \#GP? */
5358 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5359 {
5360 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5361 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5362 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5363 }
5364
5365
5366 /* Make sure the selector is present. */
5367 if (!DescCS.Legacy.Gen.u1Present)
5368 {
5369 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5370 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5371 }
5372
5373 /* Check that the new RIP is canonical. */
5374 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5375 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5376 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5377 if (!IEM_IS_CANONICAL(uNewRip))
5378 {
5379 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5380 return iemRaiseGeneralProtectionFault0(pVCpu);
5381 }
5382
5383 /*
5384 * If the privilege level changes or if the IST isn't zero, we need to get
5385 * a new stack from the TSS.
5386 */
5387 uint64_t uNewRsp;
5388 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5389 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5390 if ( uNewCpl != pVCpu->iem.s.uCpl
5391 || Idte.Gate.u3IST != 0)
5392 {
5393 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5394 if (rcStrict != VINF_SUCCESS)
5395 return rcStrict;
5396 }
5397 else
5398 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5399 uNewRsp &= ~(uint64_t)0xf;
5400
5401 /*
5402 * Calc the flag image to push.
5403 */
5404 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5405 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5406 fEfl &= ~X86_EFL_RF;
5407 else
5408 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5409
5410 /*
5411 * Start making changes.
5412 */
5413 /* Set the new CPL so that stack accesses use it. */
5414 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5415 pVCpu->iem.s.uCpl = uNewCpl;
5416
5417 /* Create the stack frame. */
5418 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5419 RTPTRUNION uStackFrame;
5420 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5421 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5422 if (rcStrict != VINF_SUCCESS)
5423 return rcStrict;
5424 void * const pvStackFrame = uStackFrame.pv;
5425
5426 if (fFlags & IEM_XCPT_FLAGS_ERR)
5427 *uStackFrame.pu64++ = uErr;
5428 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5429 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5430 uStackFrame.pu64[2] = fEfl;
5431 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5432 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5433 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5434 if (rcStrict != VINF_SUCCESS)
5435 return rcStrict;
5436
5437 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5438 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5439 * after pushing the stack frame? (Write protect the gdt + stack to
5440 * find out.) */
5441 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5442 {
5443 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5444 if (rcStrict != VINF_SUCCESS)
5445 return rcStrict;
5446 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5447 }
5448
5449 /*
5450 * Start comitting the register changes.
5451 */
5452 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5453 * hidden registers when interrupting 32-bit or 16-bit code! */
5454 if (uNewCpl != uOldCpl)
5455 {
5456 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5457 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5458 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5459 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5460 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5461 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5462 }
5463 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5464 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5465 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5466 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5467 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5468 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5469 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5470 pVCpu->cpum.GstCtx.rip = uNewRip;
5471
5472 fEfl &= ~fEflToClear;
5473 IEMMISC_SET_EFL(pVCpu, fEfl);
5474
5475 if (fFlags & IEM_XCPT_FLAGS_CR2)
5476 pVCpu->cpum.GstCtx.cr2 = uCr2;
5477
5478 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5479 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5480
5481 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5482}
5483
5484
5485/**
5486 * Implements exceptions and interrupts.
5487 *
5488 * All exceptions and interrupts goes thru this function!
5489 *
5490 * @returns VBox strict status code.
5491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5492 * @param cbInstr The number of bytes to offset rIP by in the return
5493 * address.
5494 * @param u8Vector The interrupt / exception vector number.
5495 * @param fFlags The flags.
5496 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5497 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5498 */
5499DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5500iemRaiseXcptOrInt(PVMCPU pVCpu,
5501 uint8_t cbInstr,
5502 uint8_t u8Vector,
5503 uint32_t fFlags,
5504 uint16_t uErr,
5505 uint64_t uCr2)
5506{
5507 /*
5508 * Get all the state that we might need here.
5509 */
5510 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5511 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5512
5513#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5514 /*
5515 * Flush prefetch buffer
5516 */
5517 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5518#endif
5519
5520 /*
5521 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5522 */
5523 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5524 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5525 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5526 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5527 {
5528 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5529 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5530 u8Vector = X86_XCPT_GP;
5531 uErr = 0;
5532 }
5533#ifdef DBGFTRACE_ENABLED
5534 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5535 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5536 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5537#endif
5538
5539#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5540 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5541 {
5542 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5543 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5544 return rcStrict0;
5545 }
5546#endif
5547
5548#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5549 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5550 {
5551 /*
5552 * If the event is being injected as part of VMRUN, it isn't subject to event
5553 * intercepts in the nested-guest. However, secondary exceptions that occur
5554 * during injection of any event -are- subject to exception intercepts.
5555 *
5556 * See AMD spec. 15.20 "Event Injection".
5557 */
5558 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5559 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5560 else
5561 {
5562 /*
5563 * Check and handle if the event being raised is intercepted.
5564 */
5565 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5566 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5567 return rcStrict0;
5568 }
5569 }
5570#endif
5571
5572 /*
5573 * Do recursion accounting.
5574 */
5575 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5576 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5577 if (pVCpu->iem.s.cXcptRecursions == 0)
5578 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5579 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5580 else
5581 {
5582 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5583 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5584 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5585
5586 if (pVCpu->iem.s.cXcptRecursions >= 4)
5587 {
5588#ifdef DEBUG_bird
5589 AssertFailed();
5590#endif
5591 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5592 }
5593
5594 /*
5595 * Evaluate the sequence of recurring events.
5596 */
5597 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5598 NULL /* pXcptRaiseInfo */);
5599 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5600 { /* likely */ }
5601 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5602 {
5603 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5604 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5605 u8Vector = X86_XCPT_DF;
5606 uErr = 0;
5607#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5608 /* VMX nested-guest #DF intercept needs to be checked here. */
5609 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5610 {
5611 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5612 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5613 return rcStrict0;
5614 }
5615#endif
5616 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5617 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5618 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5619 }
5620 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5621 {
5622 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5623 return iemInitiateCpuShutdown(pVCpu);
5624 }
5625 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5626 {
5627 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5628 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5629 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5630 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5631 return VERR_EM_GUEST_CPU_HANG;
5632 }
5633 else
5634 {
5635 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5636 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5637 return VERR_IEM_IPE_9;
5638 }
5639
5640 /*
5641 * The 'EXT' bit is set when an exception occurs during deliver of an external
5642 * event (such as an interrupt or earlier exception)[1]. Privileged software
5643 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5644 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5645 *
5646 * [1] - Intel spec. 6.13 "Error Code"
5647 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5648 * [3] - Intel Instruction reference for INT n.
5649 */
5650 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5651 && (fFlags & IEM_XCPT_FLAGS_ERR)
5652 && u8Vector != X86_XCPT_PF
5653 && u8Vector != X86_XCPT_DF)
5654 {
5655 uErr |= X86_TRAP_ERR_EXTERNAL;
5656 }
5657 }
5658
5659 pVCpu->iem.s.cXcptRecursions++;
5660 pVCpu->iem.s.uCurXcpt = u8Vector;
5661 pVCpu->iem.s.fCurXcpt = fFlags;
5662 pVCpu->iem.s.uCurXcptErr = uErr;
5663 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5664
5665 /*
5666 * Extensive logging.
5667 */
5668#if defined(LOG_ENABLED) && defined(IN_RING3)
5669 if (LogIs3Enabled())
5670 {
5671 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5672 PVM pVM = pVCpu->CTX_SUFF(pVM);
5673 char szRegs[4096];
5674 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5675 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5676 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5677 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5678 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5679 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5680 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5681 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5682 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5683 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5684 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5685 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5686 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5687 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5688 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5689 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5690 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5691 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5692 " efer=%016VR{efer}\n"
5693 " pat=%016VR{pat}\n"
5694 " sf_mask=%016VR{sf_mask}\n"
5695 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5696 " lstar=%016VR{lstar}\n"
5697 " star=%016VR{star} cstar=%016VR{cstar}\n"
5698 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5699 );
5700
5701 char szInstr[256];
5702 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5703 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5704 szInstr, sizeof(szInstr), NULL);
5705 Log3(("%s%s\n", szRegs, szInstr));
5706 }
5707#endif /* LOG_ENABLED */
5708
5709 /*
5710 * Call the mode specific worker function.
5711 */
5712 VBOXSTRICTRC rcStrict;
5713 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5714 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5715 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5716 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5717 else
5718 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5719
5720 /* Flush the prefetch buffer. */
5721#ifdef IEM_WITH_CODE_TLB
5722 pVCpu->iem.s.pbInstrBuf = NULL;
5723#else
5724 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5725#endif
5726
5727 /*
5728 * Unwind.
5729 */
5730 pVCpu->iem.s.cXcptRecursions--;
5731 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5732 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5733 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5734 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5735 pVCpu->iem.s.cXcptRecursions + 1));
5736 return rcStrict;
5737}
5738
5739#ifdef IEM_WITH_SETJMP
5740/**
5741 * See iemRaiseXcptOrInt. Will not return.
5742 */
5743IEM_STATIC DECL_NO_RETURN(void)
5744iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5745 uint8_t cbInstr,
5746 uint8_t u8Vector,
5747 uint32_t fFlags,
5748 uint16_t uErr,
5749 uint64_t uCr2)
5750{
5751 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5752 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5753}
5754#endif
5755
5756
5757/** \#DE - 00. */
5758DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5759{
5760 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5761}
5762
5763
5764/** \#DB - 01.
5765 * @note This automatically clear DR7.GD. */
5766DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5767{
5768 /** @todo set/clear RF. */
5769 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5770 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5771}
5772
5773
5774/** \#BR - 05. */
5775DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5776{
5777 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5778}
5779
5780
5781/** \#UD - 06. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5785}
5786
5787
5788/** \#NM - 07. */
5789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5790{
5791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5792}
5793
5794
5795/** \#TS(err) - 0a. */
5796DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5797{
5798 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5799}
5800
5801
5802/** \#TS(tr) - 0a. */
5803DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5804{
5805 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5806 pVCpu->cpum.GstCtx.tr.Sel, 0);
5807}
5808
5809
5810/** \#TS(0) - 0a. */
5811DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5812{
5813 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5814 0, 0);
5815}
5816
5817
5818/** \#TS(err) - 0a. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5820{
5821 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5822 uSel & X86_SEL_MASK_OFF_RPL, 0);
5823}
5824
5825
5826/** \#NP(err) - 0b. */
5827DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5828{
5829 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5830}
5831
5832
5833/** \#NP(sel) - 0b. */
5834DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5835{
5836 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5837 uSel & ~X86_SEL_RPL, 0);
5838}
5839
5840
5841/** \#SS(seg) - 0c. */
5842DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5843{
5844 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5845 uSel & ~X86_SEL_RPL, 0);
5846}
5847
5848
5849/** \#SS(err) - 0c. */
5850DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5851{
5852 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5853}
5854
5855
5856/** \#GP(n) - 0d. */
5857DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5858{
5859 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5860}
5861
5862
5863/** \#GP(0) - 0d. */
5864DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5865{
5866 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5867}
5868
5869#ifdef IEM_WITH_SETJMP
5870/** \#GP(0) - 0d. */
5871DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5872{
5873 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5874}
5875#endif
5876
5877
5878/** \#GP(sel) - 0d. */
5879DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5880{
5881 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5882 Sel & ~X86_SEL_RPL, 0);
5883}
5884
5885
5886/** \#GP(0) - 0d. */
5887DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5888{
5889 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5890}
5891
5892
5893/** \#GP(sel) - 0d. */
5894DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5895{
5896 NOREF(iSegReg); NOREF(fAccess);
5897 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5898 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5899}
5900
5901#ifdef IEM_WITH_SETJMP
5902/** \#GP(sel) - 0d, longjmp. */
5903DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5904{
5905 NOREF(iSegReg); NOREF(fAccess);
5906 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5907 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5908}
5909#endif
5910
5911/** \#GP(sel) - 0d. */
5912DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5913{
5914 NOREF(Sel);
5915 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5916}
5917
5918#ifdef IEM_WITH_SETJMP
5919/** \#GP(sel) - 0d, longjmp. */
5920DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5921{
5922 NOREF(Sel);
5923 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5924}
5925#endif
5926
5927
5928/** \#GP(sel) - 0d. */
5929DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5930{
5931 NOREF(iSegReg); NOREF(fAccess);
5932 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5933}
5934
5935#ifdef IEM_WITH_SETJMP
5936/** \#GP(sel) - 0d, longjmp. */
5937DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5938 uint32_t fAccess)
5939{
5940 NOREF(iSegReg); NOREF(fAccess);
5941 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5942}
5943#endif
5944
5945
5946/** \#PF(n) - 0e. */
5947DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5948{
5949 uint16_t uErr;
5950 switch (rc)
5951 {
5952 case VERR_PAGE_NOT_PRESENT:
5953 case VERR_PAGE_TABLE_NOT_PRESENT:
5954 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5955 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5956 uErr = 0;
5957 break;
5958
5959 default:
5960 AssertMsgFailed(("%Rrc\n", rc));
5961 RT_FALL_THRU();
5962 case VERR_ACCESS_DENIED:
5963 uErr = X86_TRAP_PF_P;
5964 break;
5965
5966 /** @todo reserved */
5967 }
5968
5969 if (pVCpu->iem.s.uCpl == 3)
5970 uErr |= X86_TRAP_PF_US;
5971
5972 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5973 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5974 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5975 uErr |= X86_TRAP_PF_ID;
5976
5977#if 0 /* This is so much non-sense, really. Why was it done like that? */
5978 /* Note! RW access callers reporting a WRITE protection fault, will clear
5979 the READ flag before calling. So, read-modify-write accesses (RW)
5980 can safely be reported as READ faults. */
5981 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5982 uErr |= X86_TRAP_PF_RW;
5983#else
5984 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5985 {
5986 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5987 uErr |= X86_TRAP_PF_RW;
5988 }
5989#endif
5990
5991 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5992 uErr, GCPtrWhere);
5993}
5994
5995#ifdef IEM_WITH_SETJMP
5996/** \#PF(n) - 0e, longjmp. */
5997IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5998{
5999 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6000}
6001#endif
6002
6003
6004/** \#MF(0) - 10. */
6005DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
6006{
6007 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6008}
6009
6010
6011/** \#AC(0) - 11. */
6012DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6013{
6014 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6015}
6016
6017
6018/**
6019 * Macro for calling iemCImplRaiseDivideError().
6020 *
6021 * This enables us to add/remove arguments and force different levels of
6022 * inlining as we wish.
6023 *
6024 * @return Strict VBox status code.
6025 */
6026#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6027IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6028{
6029 NOREF(cbInstr);
6030 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6031}
6032
6033
6034/**
6035 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6036 *
6037 * This enables us to add/remove arguments and force different levels of
6038 * inlining as we wish.
6039 *
6040 * @return Strict VBox status code.
6041 */
6042#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6043IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6044{
6045 NOREF(cbInstr);
6046 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6047}
6048
6049
6050/**
6051 * Macro for calling iemCImplRaiseInvalidOpcode().
6052 *
6053 * This enables us to add/remove arguments and force different levels of
6054 * inlining as we wish.
6055 *
6056 * @return Strict VBox status code.
6057 */
6058#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6059IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6060{
6061 NOREF(cbInstr);
6062 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6063}
6064
6065
6066/** @} */
6067
6068
6069/*
6070 *
6071 * Helpers routines.
6072 * Helpers routines.
6073 * Helpers routines.
6074 *
6075 */
6076
6077/**
6078 * Recalculates the effective operand size.
6079 *
6080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6081 */
6082IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6083{
6084 switch (pVCpu->iem.s.enmCpuMode)
6085 {
6086 case IEMMODE_16BIT:
6087 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6088 break;
6089 case IEMMODE_32BIT:
6090 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6091 break;
6092 case IEMMODE_64BIT:
6093 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6094 {
6095 case 0:
6096 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6097 break;
6098 case IEM_OP_PRF_SIZE_OP:
6099 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6100 break;
6101 case IEM_OP_PRF_SIZE_REX_W:
6102 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6103 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6104 break;
6105 }
6106 break;
6107 default:
6108 AssertFailed();
6109 }
6110}
6111
6112
6113/**
6114 * Sets the default operand size to 64-bit and recalculates the effective
6115 * operand size.
6116 *
6117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6118 */
6119IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6120{
6121 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6122 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6123 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6124 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6125 else
6126 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6127}
6128
6129
6130/*
6131 *
6132 * Common opcode decoders.
6133 * Common opcode decoders.
6134 * Common opcode decoders.
6135 *
6136 */
6137//#include <iprt/mem.h>
6138
6139/**
6140 * Used to add extra details about a stub case.
6141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6142 */
6143IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6144{
6145#if defined(LOG_ENABLED) && defined(IN_RING3)
6146 PVM pVM = pVCpu->CTX_SUFF(pVM);
6147 char szRegs[4096];
6148 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6149 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6150 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6151 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6152 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6153 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6154 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6155 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6156 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6157 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6158 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6159 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6160 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6161 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6162 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6163 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6164 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6165 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6166 " efer=%016VR{efer}\n"
6167 " pat=%016VR{pat}\n"
6168 " sf_mask=%016VR{sf_mask}\n"
6169 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6170 " lstar=%016VR{lstar}\n"
6171 " star=%016VR{star} cstar=%016VR{cstar}\n"
6172 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6173 );
6174
6175 char szInstr[256];
6176 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6177 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6178 szInstr, sizeof(szInstr), NULL);
6179
6180 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6181#else
6182 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6183#endif
6184}
6185
6186/**
6187 * Complains about a stub.
6188 *
6189 * Providing two versions of this macro, one for daily use and one for use when
6190 * working on IEM.
6191 */
6192#if 0
6193# define IEMOP_BITCH_ABOUT_STUB() \
6194 do { \
6195 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6196 iemOpStubMsg2(pVCpu); \
6197 RTAssertPanic(); \
6198 } while (0)
6199#else
6200# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6201#endif
6202
6203/** Stubs an opcode. */
6204#define FNIEMOP_STUB(a_Name) \
6205 FNIEMOP_DEF(a_Name) \
6206 { \
6207 RT_NOREF_PV(pVCpu); \
6208 IEMOP_BITCH_ABOUT_STUB(); \
6209 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6210 } \
6211 typedef int ignore_semicolon
6212
6213/** Stubs an opcode. */
6214#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6215 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6216 { \
6217 RT_NOREF_PV(pVCpu); \
6218 RT_NOREF_PV(a_Name0); \
6219 IEMOP_BITCH_ABOUT_STUB(); \
6220 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6221 } \
6222 typedef int ignore_semicolon
6223
6224/** Stubs an opcode which currently should raise \#UD. */
6225#define FNIEMOP_UD_STUB(a_Name) \
6226 FNIEMOP_DEF(a_Name) \
6227 { \
6228 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6229 return IEMOP_RAISE_INVALID_OPCODE(); \
6230 } \
6231 typedef int ignore_semicolon
6232
6233/** Stubs an opcode which currently should raise \#UD. */
6234#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6235 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6236 { \
6237 RT_NOREF_PV(pVCpu); \
6238 RT_NOREF_PV(a_Name0); \
6239 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6240 return IEMOP_RAISE_INVALID_OPCODE(); \
6241 } \
6242 typedef int ignore_semicolon
6243
6244
6245
6246/** @name Register Access.
6247 * @{
6248 */
6249
6250/**
6251 * Gets a reference (pointer) to the specified hidden segment register.
6252 *
6253 * @returns Hidden register reference.
6254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6255 * @param iSegReg The segment register.
6256 */
6257IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6258{
6259 Assert(iSegReg < X86_SREG_COUNT);
6260 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6261 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6262
6263#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6264 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6265 { /* likely */ }
6266 else
6267 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6268#else
6269 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6270#endif
6271 return pSReg;
6272}
6273
6274
6275/**
6276 * Ensures that the given hidden segment register is up to date.
6277 *
6278 * @returns Hidden register reference.
6279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6280 * @param pSReg The segment register.
6281 */
6282IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6283{
6284#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6285 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6286 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6287#else
6288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6289 NOREF(pVCpu);
6290#endif
6291 return pSReg;
6292}
6293
6294
6295/**
6296 * Gets a reference (pointer) to the specified segment register (the selector
6297 * value).
6298 *
6299 * @returns Pointer to the selector variable.
6300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6301 * @param iSegReg The segment register.
6302 */
6303DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6304{
6305 Assert(iSegReg < X86_SREG_COUNT);
6306 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6307 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6308}
6309
6310
6311/**
6312 * Fetches the selector value of a segment register.
6313 *
6314 * @returns The selector value.
6315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6316 * @param iSegReg The segment register.
6317 */
6318DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6319{
6320 Assert(iSegReg < X86_SREG_COUNT);
6321 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6322 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6323}
6324
6325
6326/**
6327 * Fetches the base address value of a segment register.
6328 *
6329 * @returns The selector value.
6330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6331 * @param iSegReg The segment register.
6332 */
6333DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6334{
6335 Assert(iSegReg < X86_SREG_COUNT);
6336 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6337 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6338}
6339
6340
6341/**
6342 * Gets a reference (pointer) to the specified general purpose register.
6343 *
6344 * @returns Register reference.
6345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6346 * @param iReg The general purpose register.
6347 */
6348DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6349{
6350 Assert(iReg < 16);
6351 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6352}
6353
6354
6355/**
6356 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6357 *
6358 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6359 *
6360 * @returns Register reference.
6361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6362 * @param iReg The register.
6363 */
6364DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6365{
6366 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6367 {
6368 Assert(iReg < 16);
6369 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6370 }
6371 /* high 8-bit register. */
6372 Assert(iReg < 8);
6373 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6374}
6375
6376
6377/**
6378 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6379 *
6380 * @returns Register reference.
6381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6382 * @param iReg The register.
6383 */
6384DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6385{
6386 Assert(iReg < 16);
6387 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6388}
6389
6390
6391/**
6392 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6393 *
6394 * @returns Register reference.
6395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6396 * @param iReg The register.
6397 */
6398DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6399{
6400 Assert(iReg < 16);
6401 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6402}
6403
6404
6405/**
6406 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6407 *
6408 * @returns Register reference.
6409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6410 * @param iReg The register.
6411 */
6412DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6413{
6414 Assert(iReg < 64);
6415 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6416}
6417
6418
6419/**
6420 * Gets a reference (pointer) to the specified segment register's base address.
6421 *
6422 * @returns Segment register base address reference.
6423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6424 * @param iSegReg The segment selector.
6425 */
6426DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6427{
6428 Assert(iSegReg < X86_SREG_COUNT);
6429 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6430 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6431}
6432
6433
6434/**
6435 * Fetches the value of a 8-bit general purpose register.
6436 *
6437 * @returns The register value.
6438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6439 * @param iReg The register.
6440 */
6441DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6442{
6443 return *iemGRegRefU8(pVCpu, iReg);
6444}
6445
6446
6447/**
6448 * Fetches the value of a 16-bit general purpose register.
6449 *
6450 * @returns The register value.
6451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6452 * @param iReg The register.
6453 */
6454DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6455{
6456 Assert(iReg < 16);
6457 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6458}
6459
6460
6461/**
6462 * Fetches the value of a 32-bit general purpose register.
6463 *
6464 * @returns The register value.
6465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6466 * @param iReg The register.
6467 */
6468DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6469{
6470 Assert(iReg < 16);
6471 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6472}
6473
6474
6475/**
6476 * Fetches the value of a 64-bit general purpose register.
6477 *
6478 * @returns The register value.
6479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6480 * @param iReg The register.
6481 */
6482DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6483{
6484 Assert(iReg < 16);
6485 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6486}
6487
6488
6489/**
6490 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6491 *
6492 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6493 * segment limit.
6494 *
6495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6496 * @param offNextInstr The offset of the next instruction.
6497 */
6498IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6499{
6500 switch (pVCpu->iem.s.enmEffOpSize)
6501 {
6502 case IEMMODE_16BIT:
6503 {
6504 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6505 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6506 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6507 return iemRaiseGeneralProtectionFault0(pVCpu);
6508 pVCpu->cpum.GstCtx.rip = uNewIp;
6509 break;
6510 }
6511
6512 case IEMMODE_32BIT:
6513 {
6514 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6515 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6516
6517 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6518 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6519 return iemRaiseGeneralProtectionFault0(pVCpu);
6520 pVCpu->cpum.GstCtx.rip = uNewEip;
6521 break;
6522 }
6523
6524 case IEMMODE_64BIT:
6525 {
6526 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6527
6528 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6529 if (!IEM_IS_CANONICAL(uNewRip))
6530 return iemRaiseGeneralProtectionFault0(pVCpu);
6531 pVCpu->cpum.GstCtx.rip = uNewRip;
6532 break;
6533 }
6534
6535 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6536 }
6537
6538 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6539
6540#ifndef IEM_WITH_CODE_TLB
6541 /* Flush the prefetch buffer. */
6542 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6543#endif
6544
6545 return VINF_SUCCESS;
6546}
6547
6548
6549/**
6550 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6551 *
6552 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6553 * segment limit.
6554 *
6555 * @returns Strict VBox status code.
6556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6557 * @param offNextInstr The offset of the next instruction.
6558 */
6559IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6560{
6561 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6562
6563 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6564 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6565 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6566 return iemRaiseGeneralProtectionFault0(pVCpu);
6567 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6568 pVCpu->cpum.GstCtx.rip = uNewIp;
6569 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6570
6571#ifndef IEM_WITH_CODE_TLB
6572 /* Flush the prefetch buffer. */
6573 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6574#endif
6575
6576 return VINF_SUCCESS;
6577}
6578
6579
6580/**
6581 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6582 *
6583 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6584 * segment limit.
6585 *
6586 * @returns Strict VBox status code.
6587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6588 * @param offNextInstr The offset of the next instruction.
6589 */
6590IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6591{
6592 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6593
6594 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6595 {
6596 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6597
6598 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6599 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6600 return iemRaiseGeneralProtectionFault0(pVCpu);
6601 pVCpu->cpum.GstCtx.rip = uNewEip;
6602 }
6603 else
6604 {
6605 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6606
6607 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6608 if (!IEM_IS_CANONICAL(uNewRip))
6609 return iemRaiseGeneralProtectionFault0(pVCpu);
6610 pVCpu->cpum.GstCtx.rip = uNewRip;
6611 }
6612 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6613
6614#ifndef IEM_WITH_CODE_TLB
6615 /* Flush the prefetch buffer. */
6616 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6617#endif
6618
6619 return VINF_SUCCESS;
6620}
6621
6622
6623/**
6624 * Performs a near jump to the specified address.
6625 *
6626 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6627 * segment limit.
6628 *
6629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6630 * @param uNewRip The new RIP value.
6631 */
6632IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6633{
6634 switch (pVCpu->iem.s.enmEffOpSize)
6635 {
6636 case IEMMODE_16BIT:
6637 {
6638 Assert(uNewRip <= UINT16_MAX);
6639 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6640 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6641 return iemRaiseGeneralProtectionFault0(pVCpu);
6642 /** @todo Test 16-bit jump in 64-bit mode. */
6643 pVCpu->cpum.GstCtx.rip = uNewRip;
6644 break;
6645 }
6646
6647 case IEMMODE_32BIT:
6648 {
6649 Assert(uNewRip <= UINT32_MAX);
6650 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6651 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6652
6653 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6654 return iemRaiseGeneralProtectionFault0(pVCpu);
6655 pVCpu->cpum.GstCtx.rip = uNewRip;
6656 break;
6657 }
6658
6659 case IEMMODE_64BIT:
6660 {
6661 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6662
6663 if (!IEM_IS_CANONICAL(uNewRip))
6664 return iemRaiseGeneralProtectionFault0(pVCpu);
6665 pVCpu->cpum.GstCtx.rip = uNewRip;
6666 break;
6667 }
6668
6669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6670 }
6671
6672 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6673
6674#ifndef IEM_WITH_CODE_TLB
6675 /* Flush the prefetch buffer. */
6676 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6677#endif
6678
6679 return VINF_SUCCESS;
6680}
6681
6682
6683/**
6684 * Get the address of the top of the stack.
6685 *
6686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6687 */
6688DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6689{
6690 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6691 return pVCpu->cpum.GstCtx.rsp;
6692 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6693 return pVCpu->cpum.GstCtx.esp;
6694 return pVCpu->cpum.GstCtx.sp;
6695}
6696
6697
6698/**
6699 * Updates the RIP/EIP/IP to point to the next instruction.
6700 *
6701 * This function leaves the EFLAGS.RF flag alone.
6702 *
6703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6704 * @param cbInstr The number of bytes to add.
6705 */
6706IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6707{
6708 switch (pVCpu->iem.s.enmCpuMode)
6709 {
6710 case IEMMODE_16BIT:
6711 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6712 pVCpu->cpum.GstCtx.eip += cbInstr;
6713 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6714 break;
6715
6716 case IEMMODE_32BIT:
6717 pVCpu->cpum.GstCtx.eip += cbInstr;
6718 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6719 break;
6720
6721 case IEMMODE_64BIT:
6722 pVCpu->cpum.GstCtx.rip += cbInstr;
6723 break;
6724 default: AssertFailed();
6725 }
6726}
6727
6728
6729#if 0
6730/**
6731 * Updates the RIP/EIP/IP to point to the next instruction.
6732 *
6733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6734 */
6735IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6736{
6737 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6738}
6739#endif
6740
6741
6742
6743/**
6744 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6745 *
6746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6747 * @param cbInstr The number of bytes to add.
6748 */
6749IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6750{
6751 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6752
6753 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6754#if ARCH_BITS >= 64
6755 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6756 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6757 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6758#else
6759 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6760 pVCpu->cpum.GstCtx.rip += cbInstr;
6761 else
6762 pVCpu->cpum.GstCtx.eip += cbInstr;
6763#endif
6764}
6765
6766
6767/**
6768 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6769 *
6770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6771 */
6772IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6773{
6774 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6775}
6776
6777
6778/**
6779 * Adds to the stack pointer.
6780 *
6781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6782 * @param cbToAdd The number of bytes to add (8-bit!).
6783 */
6784DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6785{
6786 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6787 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6788 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6789 pVCpu->cpum.GstCtx.esp += cbToAdd;
6790 else
6791 pVCpu->cpum.GstCtx.sp += cbToAdd;
6792}
6793
6794
6795/**
6796 * Subtracts from the stack pointer.
6797 *
6798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6799 * @param cbToSub The number of bytes to subtract (8-bit!).
6800 */
6801DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6802{
6803 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6804 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6805 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6806 pVCpu->cpum.GstCtx.esp -= cbToSub;
6807 else
6808 pVCpu->cpum.GstCtx.sp -= cbToSub;
6809}
6810
6811
6812/**
6813 * Adds to the temporary stack pointer.
6814 *
6815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6816 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6817 * @param cbToAdd The number of bytes to add (16-bit).
6818 */
6819DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6820{
6821 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6822 pTmpRsp->u += cbToAdd;
6823 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6824 pTmpRsp->DWords.dw0 += cbToAdd;
6825 else
6826 pTmpRsp->Words.w0 += cbToAdd;
6827}
6828
6829
6830/**
6831 * Subtracts from the temporary stack pointer.
6832 *
6833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6834 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6835 * @param cbToSub The number of bytes to subtract.
6836 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6837 * expecting that.
6838 */
6839DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6840{
6841 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6842 pTmpRsp->u -= cbToSub;
6843 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6844 pTmpRsp->DWords.dw0 -= cbToSub;
6845 else
6846 pTmpRsp->Words.w0 -= cbToSub;
6847}
6848
6849
6850/**
6851 * Calculates the effective stack address for a push of the specified size as
6852 * well as the new RSP value (upper bits may be masked).
6853 *
6854 * @returns Effective stack addressf for the push.
6855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6856 * @param cbItem The size of the stack item to pop.
6857 * @param puNewRsp Where to return the new RSP value.
6858 */
6859DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6860{
6861 RTUINT64U uTmpRsp;
6862 RTGCPTR GCPtrTop;
6863 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6864
6865 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6866 GCPtrTop = uTmpRsp.u -= cbItem;
6867 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6868 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6869 else
6870 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6871 *puNewRsp = uTmpRsp.u;
6872 return GCPtrTop;
6873}
6874
6875
6876/**
6877 * Gets the current stack pointer and calculates the value after a pop of the
6878 * specified size.
6879 *
6880 * @returns Current stack pointer.
6881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6882 * @param cbItem The size of the stack item to pop.
6883 * @param puNewRsp Where to return the new RSP value.
6884 */
6885DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6886{
6887 RTUINT64U uTmpRsp;
6888 RTGCPTR GCPtrTop;
6889 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6890
6891 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6892 {
6893 GCPtrTop = uTmpRsp.u;
6894 uTmpRsp.u += cbItem;
6895 }
6896 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6897 {
6898 GCPtrTop = uTmpRsp.DWords.dw0;
6899 uTmpRsp.DWords.dw0 += cbItem;
6900 }
6901 else
6902 {
6903 GCPtrTop = uTmpRsp.Words.w0;
6904 uTmpRsp.Words.w0 += cbItem;
6905 }
6906 *puNewRsp = uTmpRsp.u;
6907 return GCPtrTop;
6908}
6909
6910
6911/**
6912 * Calculates the effective stack address for a push of the specified size as
6913 * well as the new temporary RSP value (upper bits may be masked).
6914 *
6915 * @returns Effective stack addressf for the push.
6916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6917 * @param pTmpRsp The temporary stack pointer. This is updated.
6918 * @param cbItem The size of the stack item to pop.
6919 */
6920DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6921{
6922 RTGCPTR GCPtrTop;
6923
6924 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6925 GCPtrTop = pTmpRsp->u -= cbItem;
6926 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6927 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6928 else
6929 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6930 return GCPtrTop;
6931}
6932
6933
6934/**
6935 * Gets the effective stack address for a pop of the specified size and
6936 * calculates and updates the temporary RSP.
6937 *
6938 * @returns Current stack pointer.
6939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6940 * @param pTmpRsp The temporary stack pointer. This is updated.
6941 * @param cbItem The size of the stack item to pop.
6942 */
6943DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6944{
6945 RTGCPTR GCPtrTop;
6946 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6947 {
6948 GCPtrTop = pTmpRsp->u;
6949 pTmpRsp->u += cbItem;
6950 }
6951 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6952 {
6953 GCPtrTop = pTmpRsp->DWords.dw0;
6954 pTmpRsp->DWords.dw0 += cbItem;
6955 }
6956 else
6957 {
6958 GCPtrTop = pTmpRsp->Words.w0;
6959 pTmpRsp->Words.w0 += cbItem;
6960 }
6961 return GCPtrTop;
6962}
6963
6964/** @} */
6965
6966
6967/** @name FPU access and helpers.
6968 *
6969 * @{
6970 */
6971
6972
6973/**
6974 * Hook for preparing to use the host FPU.
6975 *
6976 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6977 *
6978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6979 */
6980DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6981{
6982#ifdef IN_RING3
6983 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6984#else
6985 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6986#endif
6987 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6988}
6989
6990
6991/**
6992 * Hook for preparing to use the host FPU for SSE.
6993 *
6994 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6995 *
6996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6997 */
6998DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6999{
7000 iemFpuPrepareUsage(pVCpu);
7001}
7002
7003
7004/**
7005 * Hook for preparing to use the host FPU for AVX.
7006 *
7007 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7008 *
7009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7010 */
7011DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7012{
7013 iemFpuPrepareUsage(pVCpu);
7014}
7015
7016
7017/**
7018 * Hook for actualizing the guest FPU state before the interpreter reads it.
7019 *
7020 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7021 *
7022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7023 */
7024DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7025{
7026#ifdef IN_RING3
7027 NOREF(pVCpu);
7028#else
7029 CPUMRZFpuStateActualizeForRead(pVCpu);
7030#endif
7031 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7032}
7033
7034
7035/**
7036 * Hook for actualizing the guest FPU state before the interpreter changes it.
7037 *
7038 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7039 *
7040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7041 */
7042DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7043{
7044#ifdef IN_RING3
7045 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7046#else
7047 CPUMRZFpuStateActualizeForChange(pVCpu);
7048#endif
7049 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7050}
7051
7052
7053/**
7054 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7055 * only.
7056 *
7057 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7058 *
7059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7060 */
7061DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7062{
7063#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7064 NOREF(pVCpu);
7065#else
7066 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7067#endif
7068 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7069}
7070
7071
7072/**
7073 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7074 * read+write.
7075 *
7076 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7077 *
7078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7079 */
7080DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7081{
7082#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7083 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7084#else
7085 CPUMRZFpuStateActualizeForChange(pVCpu);
7086#endif
7087 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7088}
7089
7090
7091/**
7092 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7093 * only.
7094 *
7095 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7096 *
7097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7098 */
7099DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7100{
7101#ifdef IN_RING3
7102 NOREF(pVCpu);
7103#else
7104 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7105#endif
7106 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7107}
7108
7109
7110/**
7111 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7112 * read+write.
7113 *
7114 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7115 *
7116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7117 */
7118DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7119{
7120#ifdef IN_RING3
7121 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7122#else
7123 CPUMRZFpuStateActualizeForChange(pVCpu);
7124#endif
7125 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7126}
7127
7128
7129/**
7130 * Stores a QNaN value into a FPU register.
7131 *
7132 * @param pReg Pointer to the register.
7133 */
7134DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7135{
7136 pReg->au32[0] = UINT32_C(0x00000000);
7137 pReg->au32[1] = UINT32_C(0xc0000000);
7138 pReg->au16[4] = UINT16_C(0xffff);
7139}
7140
7141
7142/**
7143 * Updates the FOP, FPU.CS and FPUIP registers.
7144 *
7145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7146 * @param pFpuCtx The FPU context.
7147 */
7148DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7149{
7150 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7151 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7152 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7153 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7154 {
7155 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7156 * happens in real mode here based on the fnsave and fnstenv images. */
7157 pFpuCtx->CS = 0;
7158 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7159 }
7160 else
7161 {
7162 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7163 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7164 }
7165}
7166
7167
7168/**
7169 * Updates the x87.DS and FPUDP registers.
7170 *
7171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7172 * @param pFpuCtx The FPU context.
7173 * @param iEffSeg The effective segment register.
7174 * @param GCPtrEff The effective address relative to @a iEffSeg.
7175 */
7176DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7177{
7178 RTSEL sel;
7179 switch (iEffSeg)
7180 {
7181 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7182 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7183 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7184 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7185 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7186 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7187 default:
7188 AssertMsgFailed(("%d\n", iEffSeg));
7189 sel = pVCpu->cpum.GstCtx.ds.Sel;
7190 }
7191 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7192 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7193 {
7194 pFpuCtx->DS = 0;
7195 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7196 }
7197 else
7198 {
7199 pFpuCtx->DS = sel;
7200 pFpuCtx->FPUDP = GCPtrEff;
7201 }
7202}
7203
7204
7205/**
7206 * Rotates the stack registers in the push direction.
7207 *
7208 * @param pFpuCtx The FPU context.
7209 * @remarks This is a complete waste of time, but fxsave stores the registers in
7210 * stack order.
7211 */
7212DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7213{
7214 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7215 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7216 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7217 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7218 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7219 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7220 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7221 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7222 pFpuCtx->aRegs[0].r80 = r80Tmp;
7223}
7224
7225
7226/**
7227 * Rotates the stack registers in the pop direction.
7228 *
7229 * @param pFpuCtx The FPU context.
7230 * @remarks This is a complete waste of time, but fxsave stores the registers in
7231 * stack order.
7232 */
7233DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7234{
7235 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7236 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7237 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7238 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7239 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7240 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7241 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7242 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7243 pFpuCtx->aRegs[7].r80 = r80Tmp;
7244}
7245
7246
7247/**
7248 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7249 * exception prevents it.
7250 *
7251 * @param pResult The FPU operation result to push.
7252 * @param pFpuCtx The FPU context.
7253 */
7254IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7255{
7256 /* Update FSW and bail if there are pending exceptions afterwards. */
7257 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7258 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7259 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7260 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7261 {
7262 pFpuCtx->FSW = fFsw;
7263 return;
7264 }
7265
7266 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7267 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7268 {
7269 /* All is fine, push the actual value. */
7270 pFpuCtx->FTW |= RT_BIT(iNewTop);
7271 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7272 }
7273 else if (pFpuCtx->FCW & X86_FCW_IM)
7274 {
7275 /* Masked stack overflow, push QNaN. */
7276 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7277 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7278 }
7279 else
7280 {
7281 /* Raise stack overflow, don't push anything. */
7282 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7283 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7284 return;
7285 }
7286
7287 fFsw &= ~X86_FSW_TOP_MASK;
7288 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7289 pFpuCtx->FSW = fFsw;
7290
7291 iemFpuRotateStackPush(pFpuCtx);
7292}
7293
7294
7295/**
7296 * Stores a result in a FPU register and updates the FSW and FTW.
7297 *
7298 * @param pFpuCtx The FPU context.
7299 * @param pResult The result to store.
7300 * @param iStReg Which FPU register to store it in.
7301 */
7302IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7303{
7304 Assert(iStReg < 8);
7305 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7306 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7307 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7308 pFpuCtx->FTW |= RT_BIT(iReg);
7309 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7310}
7311
7312
7313/**
7314 * Only updates the FPU status word (FSW) with the result of the current
7315 * instruction.
7316 *
7317 * @param pFpuCtx The FPU context.
7318 * @param u16FSW The FSW output of the current instruction.
7319 */
7320IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7321{
7322 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7323 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7324}
7325
7326
7327/**
7328 * Pops one item off the FPU stack if no pending exception prevents it.
7329 *
7330 * @param pFpuCtx The FPU context.
7331 */
7332IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7333{
7334 /* Check pending exceptions. */
7335 uint16_t uFSW = pFpuCtx->FSW;
7336 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7337 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7338 return;
7339
7340 /* TOP--. */
7341 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7342 uFSW &= ~X86_FSW_TOP_MASK;
7343 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7344 pFpuCtx->FSW = uFSW;
7345
7346 /* Mark the previous ST0 as empty. */
7347 iOldTop >>= X86_FSW_TOP_SHIFT;
7348 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7349
7350 /* Rotate the registers. */
7351 iemFpuRotateStackPop(pFpuCtx);
7352}
7353
7354
7355/**
7356 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7357 *
7358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7359 * @param pResult The FPU operation result to push.
7360 */
7361IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7362{
7363 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7364 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7365 iemFpuMaybePushResult(pResult, pFpuCtx);
7366}
7367
7368
7369/**
7370 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7371 * and sets FPUDP and FPUDS.
7372 *
7373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7374 * @param pResult The FPU operation result to push.
7375 * @param iEffSeg The effective segment register.
7376 * @param GCPtrEff The effective address relative to @a iEffSeg.
7377 */
7378IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7379{
7380 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7381 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7382 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7383 iemFpuMaybePushResult(pResult, pFpuCtx);
7384}
7385
7386
7387/**
7388 * Replace ST0 with the first value and push the second onto the FPU stack,
7389 * unless a pending exception prevents it.
7390 *
7391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7392 * @param pResult The FPU operation result to store and push.
7393 */
7394IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7395{
7396 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7397 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7398
7399 /* Update FSW and bail if there are pending exceptions afterwards. */
7400 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7401 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7402 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7403 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7404 {
7405 pFpuCtx->FSW = fFsw;
7406 return;
7407 }
7408
7409 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7410 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7411 {
7412 /* All is fine, push the actual value. */
7413 pFpuCtx->FTW |= RT_BIT(iNewTop);
7414 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7415 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7416 }
7417 else if (pFpuCtx->FCW & X86_FCW_IM)
7418 {
7419 /* Masked stack overflow, push QNaN. */
7420 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7421 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7422 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7423 }
7424 else
7425 {
7426 /* Raise stack overflow, don't push anything. */
7427 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7428 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7429 return;
7430 }
7431
7432 fFsw &= ~X86_FSW_TOP_MASK;
7433 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7434 pFpuCtx->FSW = fFsw;
7435
7436 iemFpuRotateStackPush(pFpuCtx);
7437}
7438
7439
7440/**
7441 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7442 * FOP.
7443 *
7444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7445 * @param pResult The result to store.
7446 * @param iStReg Which FPU register to store it in.
7447 */
7448IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7449{
7450 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7451 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7452 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7453}
7454
7455
7456/**
7457 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7458 * FOP, and then pops the stack.
7459 *
7460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7461 * @param pResult The result to store.
7462 * @param iStReg Which FPU register to store it in.
7463 */
7464IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7465{
7466 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7467 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7468 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7469 iemFpuMaybePopOne(pFpuCtx);
7470}
7471
7472
7473/**
7474 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7475 * FPUDP, and FPUDS.
7476 *
7477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7478 * @param pResult The result to store.
7479 * @param iStReg Which FPU register to store it in.
7480 * @param iEffSeg The effective memory operand selector register.
7481 * @param GCPtrEff The effective memory operand offset.
7482 */
7483IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7484 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7485{
7486 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7487 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7488 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7489 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7490}
7491
7492
7493/**
7494 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7495 * FPUDP, and FPUDS, and then pops the stack.
7496 *
7497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7498 * @param pResult The result to store.
7499 * @param iStReg Which FPU register to store it in.
7500 * @param iEffSeg The effective memory operand selector register.
7501 * @param GCPtrEff The effective memory operand offset.
7502 */
7503IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7504 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7505{
7506 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7507 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7508 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7509 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7510 iemFpuMaybePopOne(pFpuCtx);
7511}
7512
7513
7514/**
7515 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7516 *
7517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7518 */
7519IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7520{
7521 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7522 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7523}
7524
7525
7526/**
7527 * Marks the specified stack register as free (for FFREE).
7528 *
7529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7530 * @param iStReg The register to free.
7531 */
7532IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7533{
7534 Assert(iStReg < 8);
7535 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7536 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7537 pFpuCtx->FTW &= ~RT_BIT(iReg);
7538}
7539
7540
7541/**
7542 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7543 *
7544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7545 */
7546IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7547{
7548 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7549 uint16_t uFsw = pFpuCtx->FSW;
7550 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7551 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7552 uFsw &= ~X86_FSW_TOP_MASK;
7553 uFsw |= uTop;
7554 pFpuCtx->FSW = uFsw;
7555}
7556
7557
7558/**
7559 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7560 *
7561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7562 */
7563IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7564{
7565 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7566 uint16_t uFsw = pFpuCtx->FSW;
7567 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7568 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7569 uFsw &= ~X86_FSW_TOP_MASK;
7570 uFsw |= uTop;
7571 pFpuCtx->FSW = uFsw;
7572}
7573
7574
7575/**
7576 * Updates the FSW, FOP, FPUIP, and FPUCS.
7577 *
7578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7579 * @param u16FSW The FSW from the current instruction.
7580 */
7581IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7582{
7583 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7584 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7585 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7586}
7587
7588
7589/**
7590 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7591 *
7592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7593 * @param u16FSW The FSW from the current instruction.
7594 */
7595IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7596{
7597 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7598 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7599 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7600 iemFpuMaybePopOne(pFpuCtx);
7601}
7602
7603
7604/**
7605 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7606 *
7607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7608 * @param u16FSW The FSW from the current instruction.
7609 * @param iEffSeg The effective memory operand selector register.
7610 * @param GCPtrEff The effective memory operand offset.
7611 */
7612IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7613{
7614 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7615 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7616 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7617 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7618}
7619
7620
7621/**
7622 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7623 *
7624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7625 * @param u16FSW The FSW from the current instruction.
7626 */
7627IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7628{
7629 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7630 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7631 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7632 iemFpuMaybePopOne(pFpuCtx);
7633 iemFpuMaybePopOne(pFpuCtx);
7634}
7635
7636
7637/**
7638 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7639 *
7640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7641 * @param u16FSW The FSW from the current instruction.
7642 * @param iEffSeg The effective memory operand selector register.
7643 * @param GCPtrEff The effective memory operand offset.
7644 */
7645IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7646{
7647 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7648 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7649 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7650 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7651 iemFpuMaybePopOne(pFpuCtx);
7652}
7653
7654
7655/**
7656 * Worker routine for raising an FPU stack underflow exception.
7657 *
7658 * @param pFpuCtx The FPU context.
7659 * @param iStReg The stack register being accessed.
7660 */
7661IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7662{
7663 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7664 if (pFpuCtx->FCW & X86_FCW_IM)
7665 {
7666 /* Masked underflow. */
7667 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7668 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7669 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7670 if (iStReg != UINT8_MAX)
7671 {
7672 pFpuCtx->FTW |= RT_BIT(iReg);
7673 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7674 }
7675 }
7676 else
7677 {
7678 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7679 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7680 }
7681}
7682
7683
7684/**
7685 * Raises a FPU stack underflow exception.
7686 *
7687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7688 * @param iStReg The destination register that should be loaded
7689 * with QNaN if \#IS is not masked. Specify
7690 * UINT8_MAX if none (like for fcom).
7691 */
7692DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7693{
7694 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7695 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7696 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7697}
7698
7699
7700DECL_NO_INLINE(IEM_STATIC, void)
7701iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7702{
7703 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7704 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7705 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7706 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7707}
7708
7709
7710DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7711{
7712 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7713 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7714 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7715 iemFpuMaybePopOne(pFpuCtx);
7716}
7717
7718
7719DECL_NO_INLINE(IEM_STATIC, void)
7720iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7721{
7722 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7723 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7724 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7725 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7726 iemFpuMaybePopOne(pFpuCtx);
7727}
7728
7729
7730DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7731{
7732 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7733 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7734 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7735 iemFpuMaybePopOne(pFpuCtx);
7736 iemFpuMaybePopOne(pFpuCtx);
7737}
7738
7739
7740DECL_NO_INLINE(IEM_STATIC, void)
7741iemFpuStackPushUnderflow(PVMCPU pVCpu)
7742{
7743 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7744 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7745
7746 if (pFpuCtx->FCW & X86_FCW_IM)
7747 {
7748 /* Masked overflow - Push QNaN. */
7749 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7750 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7751 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7752 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7753 pFpuCtx->FTW |= RT_BIT(iNewTop);
7754 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7755 iemFpuRotateStackPush(pFpuCtx);
7756 }
7757 else
7758 {
7759 /* Exception pending - don't change TOP or the register stack. */
7760 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7761 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7762 }
7763}
7764
7765
7766DECL_NO_INLINE(IEM_STATIC, void)
7767iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7768{
7769 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7770 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7771
7772 if (pFpuCtx->FCW & X86_FCW_IM)
7773 {
7774 /* Masked overflow - Push QNaN. */
7775 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7776 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7777 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7778 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7779 pFpuCtx->FTW |= RT_BIT(iNewTop);
7780 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7781 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7782 iemFpuRotateStackPush(pFpuCtx);
7783 }
7784 else
7785 {
7786 /* Exception pending - don't change TOP or the register stack. */
7787 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7788 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7789 }
7790}
7791
7792
7793/**
7794 * Worker routine for raising an FPU stack overflow exception on a push.
7795 *
7796 * @param pFpuCtx The FPU context.
7797 */
7798IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7799{
7800 if (pFpuCtx->FCW & X86_FCW_IM)
7801 {
7802 /* Masked overflow. */
7803 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7804 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7805 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7806 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7807 pFpuCtx->FTW |= RT_BIT(iNewTop);
7808 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7809 iemFpuRotateStackPush(pFpuCtx);
7810 }
7811 else
7812 {
7813 /* Exception pending - don't change TOP or the register stack. */
7814 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7815 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7816 }
7817}
7818
7819
7820/**
7821 * Raises a FPU stack overflow exception on a push.
7822 *
7823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7824 */
7825DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7826{
7827 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7828 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7829 iemFpuStackPushOverflowOnly(pFpuCtx);
7830}
7831
7832
7833/**
7834 * Raises a FPU stack overflow exception on a push with a memory operand.
7835 *
7836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7837 * @param iEffSeg The effective memory operand selector register.
7838 * @param GCPtrEff The effective memory operand offset.
7839 */
7840DECL_NO_INLINE(IEM_STATIC, void)
7841iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7842{
7843 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7844 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7845 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7846 iemFpuStackPushOverflowOnly(pFpuCtx);
7847}
7848
7849
7850IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7851{
7852 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7853 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7854 if (pFpuCtx->FTW & RT_BIT(iReg))
7855 return VINF_SUCCESS;
7856 return VERR_NOT_FOUND;
7857}
7858
7859
7860IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7861{
7862 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7863 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7864 if (pFpuCtx->FTW & RT_BIT(iReg))
7865 {
7866 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7867 return VINF_SUCCESS;
7868 }
7869 return VERR_NOT_FOUND;
7870}
7871
7872
7873IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7874 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7875{
7876 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7877 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7878 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7879 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7880 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7881 {
7882 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7883 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7884 return VINF_SUCCESS;
7885 }
7886 return VERR_NOT_FOUND;
7887}
7888
7889
7890IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7891{
7892 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7893 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7894 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7895 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7896 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7897 {
7898 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7899 return VINF_SUCCESS;
7900 }
7901 return VERR_NOT_FOUND;
7902}
7903
7904
7905/**
7906 * Updates the FPU exception status after FCW is changed.
7907 *
7908 * @param pFpuCtx The FPU context.
7909 */
7910IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7911{
7912 uint16_t u16Fsw = pFpuCtx->FSW;
7913 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7914 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7915 else
7916 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7917 pFpuCtx->FSW = u16Fsw;
7918}
7919
7920
7921/**
7922 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7923 *
7924 * @returns The full FTW.
7925 * @param pFpuCtx The FPU context.
7926 */
7927IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7928{
7929 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7930 uint16_t u16Ftw = 0;
7931 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7932 for (unsigned iSt = 0; iSt < 8; iSt++)
7933 {
7934 unsigned const iReg = (iSt + iTop) & 7;
7935 if (!(u8Ftw & RT_BIT(iReg)))
7936 u16Ftw |= 3 << (iReg * 2); /* empty */
7937 else
7938 {
7939 uint16_t uTag;
7940 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7941 if (pr80Reg->s.uExponent == 0x7fff)
7942 uTag = 2; /* Exponent is all 1's => Special. */
7943 else if (pr80Reg->s.uExponent == 0x0000)
7944 {
7945 if (pr80Reg->s.u64Mantissa == 0x0000)
7946 uTag = 1; /* All bits are zero => Zero. */
7947 else
7948 uTag = 2; /* Must be special. */
7949 }
7950 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7951 uTag = 0; /* Valid. */
7952 else
7953 uTag = 2; /* Must be special. */
7954
7955 u16Ftw |= uTag << (iReg * 2); /* empty */
7956 }
7957 }
7958
7959 return u16Ftw;
7960}
7961
7962
7963/**
7964 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7965 *
7966 * @returns The compressed FTW.
7967 * @param u16FullFtw The full FTW to convert.
7968 */
7969IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7970{
7971 uint8_t u8Ftw = 0;
7972 for (unsigned i = 0; i < 8; i++)
7973 {
7974 if ((u16FullFtw & 3) != 3 /*empty*/)
7975 u8Ftw |= RT_BIT(i);
7976 u16FullFtw >>= 2;
7977 }
7978
7979 return u8Ftw;
7980}
7981
7982/** @} */
7983
7984
7985/** @name Memory access.
7986 *
7987 * @{
7988 */
7989
7990
7991/**
7992 * Updates the IEMCPU::cbWritten counter if applicable.
7993 *
7994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7995 * @param fAccess The access being accounted for.
7996 * @param cbMem The access size.
7997 */
7998DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7999{
8000 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8001 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8002 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8003}
8004
8005
8006/**
8007 * Checks if the given segment can be written to, raise the appropriate
8008 * exception if not.
8009 *
8010 * @returns VBox strict status code.
8011 *
8012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8013 * @param pHid Pointer to the hidden register.
8014 * @param iSegReg The register number.
8015 * @param pu64BaseAddr Where to return the base address to use for the
8016 * segment. (In 64-bit code it may differ from the
8017 * base in the hidden segment.)
8018 */
8019IEM_STATIC VBOXSTRICTRC
8020iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8021{
8022 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8023
8024 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8025 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8026 else
8027 {
8028 if (!pHid->Attr.n.u1Present)
8029 {
8030 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8031 AssertRelease(uSel == 0);
8032 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8033 return iemRaiseGeneralProtectionFault0(pVCpu);
8034 }
8035
8036 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8037 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8038 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8039 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8040 *pu64BaseAddr = pHid->u64Base;
8041 }
8042 return VINF_SUCCESS;
8043}
8044
8045
8046/**
8047 * Checks if the given segment can be read from, raise the appropriate
8048 * exception if not.
8049 *
8050 * @returns VBox strict status code.
8051 *
8052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8053 * @param pHid Pointer to the hidden register.
8054 * @param iSegReg The register number.
8055 * @param pu64BaseAddr Where to return the base address to use for the
8056 * segment. (In 64-bit code it may differ from the
8057 * base in the hidden segment.)
8058 */
8059IEM_STATIC VBOXSTRICTRC
8060iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8061{
8062 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8063
8064 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8065 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8066 else
8067 {
8068 if (!pHid->Attr.n.u1Present)
8069 {
8070 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8071 AssertRelease(uSel == 0);
8072 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8073 return iemRaiseGeneralProtectionFault0(pVCpu);
8074 }
8075
8076 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8077 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8078 *pu64BaseAddr = pHid->u64Base;
8079 }
8080 return VINF_SUCCESS;
8081}
8082
8083
8084/**
8085 * Applies the segment limit, base and attributes.
8086 *
8087 * This may raise a \#GP or \#SS.
8088 *
8089 * @returns VBox strict status code.
8090 *
8091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8092 * @param fAccess The kind of access which is being performed.
8093 * @param iSegReg The index of the segment register to apply.
8094 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8095 * TSS, ++).
8096 * @param cbMem The access size.
8097 * @param pGCPtrMem Pointer to the guest memory address to apply
8098 * segmentation to. Input and output parameter.
8099 */
8100IEM_STATIC VBOXSTRICTRC
8101iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8102{
8103 if (iSegReg == UINT8_MAX)
8104 return VINF_SUCCESS;
8105
8106 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8107 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8108 switch (pVCpu->iem.s.enmCpuMode)
8109 {
8110 case IEMMODE_16BIT:
8111 case IEMMODE_32BIT:
8112 {
8113 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8114 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8115
8116 if ( pSel->Attr.n.u1Present
8117 && !pSel->Attr.n.u1Unusable)
8118 {
8119 Assert(pSel->Attr.n.u1DescType);
8120 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8121 {
8122 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8123 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8124 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8125
8126 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8127 {
8128 /** @todo CPL check. */
8129 }
8130
8131 /*
8132 * There are two kinds of data selectors, normal and expand down.
8133 */
8134 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8135 {
8136 if ( GCPtrFirst32 > pSel->u32Limit
8137 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8138 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8139 }
8140 else
8141 {
8142 /*
8143 * The upper boundary is defined by the B bit, not the G bit!
8144 */
8145 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8146 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8147 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8148 }
8149 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8150 }
8151 else
8152 {
8153
8154 /*
8155 * Code selector and usually be used to read thru, writing is
8156 * only permitted in real and V8086 mode.
8157 */
8158 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8159 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8160 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8161 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8162 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8163
8164 if ( GCPtrFirst32 > pSel->u32Limit
8165 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8166 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8167
8168 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8169 {
8170 /** @todo CPL check. */
8171 }
8172
8173 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8174 }
8175 }
8176 else
8177 return iemRaiseGeneralProtectionFault0(pVCpu);
8178 return VINF_SUCCESS;
8179 }
8180
8181 case IEMMODE_64BIT:
8182 {
8183 RTGCPTR GCPtrMem = *pGCPtrMem;
8184 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8185 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8186
8187 Assert(cbMem >= 1);
8188 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8189 return VINF_SUCCESS;
8190 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8191 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8192 return iemRaiseGeneralProtectionFault0(pVCpu);
8193 }
8194
8195 default:
8196 AssertFailedReturn(VERR_IEM_IPE_7);
8197 }
8198}
8199
8200
8201/**
8202 * Translates a virtual address to a physical physical address and checks if we
8203 * can access the page as specified.
8204 *
8205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8206 * @param GCPtrMem The virtual address.
8207 * @param fAccess The intended access.
8208 * @param pGCPhysMem Where to return the physical address.
8209 */
8210IEM_STATIC VBOXSTRICTRC
8211iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8212{
8213 /** @todo Need a different PGM interface here. We're currently using
8214 * generic / REM interfaces. this won't cut it for R0 & RC. */
8215 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8216 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8217 RTGCPHYS GCPhys;
8218 uint64_t fFlags;
8219 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8220 if (RT_FAILURE(rc))
8221 {
8222 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8223 /** @todo Check unassigned memory in unpaged mode. */
8224 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8225 *pGCPhysMem = NIL_RTGCPHYS;
8226 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8227 }
8228
8229 /* If the page is writable and does not have the no-exec bit set, all
8230 access is allowed. Otherwise we'll have to check more carefully... */
8231 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8232 {
8233 /* Write to read only memory? */
8234 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8235 && !(fFlags & X86_PTE_RW)
8236 && ( (pVCpu->iem.s.uCpl == 3
8237 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8238 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8239 {
8240 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8241 *pGCPhysMem = NIL_RTGCPHYS;
8242 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8243 }
8244
8245 /* Kernel memory accessed by userland? */
8246 if ( !(fFlags & X86_PTE_US)
8247 && pVCpu->iem.s.uCpl == 3
8248 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8249 {
8250 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8251 *pGCPhysMem = NIL_RTGCPHYS;
8252 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8253 }
8254
8255 /* Executing non-executable memory? */
8256 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8257 && (fFlags & X86_PTE_PAE_NX)
8258 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8259 {
8260 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8261 *pGCPhysMem = NIL_RTGCPHYS;
8262 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8263 VERR_ACCESS_DENIED);
8264 }
8265 }
8266
8267 /*
8268 * Set the dirty / access flags.
8269 * ASSUMES this is set when the address is translated rather than on committ...
8270 */
8271 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8272 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8273 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8274 {
8275 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8276 AssertRC(rc2);
8277 }
8278
8279 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8280 *pGCPhysMem = GCPhys;
8281 return VINF_SUCCESS;
8282}
8283
8284
8285
8286/**
8287 * Maps a physical page.
8288 *
8289 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8291 * @param GCPhysMem The physical address.
8292 * @param fAccess The intended access.
8293 * @param ppvMem Where to return the mapping address.
8294 * @param pLock The PGM lock.
8295 */
8296IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8297{
8298#ifdef IEM_LOG_MEMORY_WRITES
8299 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8300 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8301#endif
8302
8303 /** @todo This API may require some improving later. A private deal with PGM
8304 * regarding locking and unlocking needs to be struct. A couple of TLBs
8305 * living in PGM, but with publicly accessible inlined access methods
8306 * could perhaps be an even better solution. */
8307 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8308 GCPhysMem,
8309 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8310 pVCpu->iem.s.fBypassHandlers,
8311 ppvMem,
8312 pLock);
8313 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8314 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8315
8316 return rc;
8317}
8318
8319
8320/**
8321 * Unmap a page previously mapped by iemMemPageMap.
8322 *
8323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8324 * @param GCPhysMem The physical address.
8325 * @param fAccess The intended access.
8326 * @param pvMem What iemMemPageMap returned.
8327 * @param pLock The PGM lock.
8328 */
8329DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8330{
8331 NOREF(pVCpu);
8332 NOREF(GCPhysMem);
8333 NOREF(fAccess);
8334 NOREF(pvMem);
8335 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8336}
8337
8338
8339/**
8340 * Looks up a memory mapping entry.
8341 *
8342 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8344 * @param pvMem The memory address.
8345 * @param fAccess The access to.
8346 */
8347DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8348{
8349 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8350 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8351 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8352 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8353 return 0;
8354 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8355 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8356 return 1;
8357 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8358 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8359 return 2;
8360 return VERR_NOT_FOUND;
8361}
8362
8363
8364/**
8365 * Finds a free memmap entry when using iNextMapping doesn't work.
8366 *
8367 * @returns Memory mapping index, 1024 on failure.
8368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8369 */
8370IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8371{
8372 /*
8373 * The easy case.
8374 */
8375 if (pVCpu->iem.s.cActiveMappings == 0)
8376 {
8377 pVCpu->iem.s.iNextMapping = 1;
8378 return 0;
8379 }
8380
8381 /* There should be enough mappings for all instructions. */
8382 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8383
8384 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8385 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8386 return i;
8387
8388 AssertFailedReturn(1024);
8389}
8390
8391
8392/**
8393 * Commits a bounce buffer that needs writing back and unmaps it.
8394 *
8395 * @returns Strict VBox status code.
8396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8397 * @param iMemMap The index of the buffer to commit.
8398 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8399 * Always false in ring-3, obviously.
8400 */
8401IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8402{
8403 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8404 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8405#ifdef IN_RING3
8406 Assert(!fPostponeFail);
8407 RT_NOREF_PV(fPostponeFail);
8408#endif
8409
8410 /*
8411 * Do the writing.
8412 */
8413 PVM pVM = pVCpu->CTX_SUFF(pVM);
8414 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8415 {
8416 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8417 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8418 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8419 if (!pVCpu->iem.s.fBypassHandlers)
8420 {
8421 /*
8422 * Carefully and efficiently dealing with access handler return
8423 * codes make this a little bloated.
8424 */
8425 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8427 pbBuf,
8428 cbFirst,
8429 PGMACCESSORIGIN_IEM);
8430 if (rcStrict == VINF_SUCCESS)
8431 {
8432 if (cbSecond)
8433 {
8434 rcStrict = PGMPhysWrite(pVM,
8435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8436 pbBuf + cbFirst,
8437 cbSecond,
8438 PGMACCESSORIGIN_IEM);
8439 if (rcStrict == VINF_SUCCESS)
8440 { /* nothing */ }
8441 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8442 {
8443 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8445 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8446 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8447 }
8448#ifndef IN_RING3
8449 else if (fPostponeFail)
8450 {
8451 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8454 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8455 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8456 return iemSetPassUpStatus(pVCpu, rcStrict);
8457 }
8458#endif
8459 else
8460 {
8461 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8462 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8463 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8464 return rcStrict;
8465 }
8466 }
8467 }
8468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8469 {
8470 if (!cbSecond)
8471 {
8472 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8474 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8475 }
8476 else
8477 {
8478 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8480 pbBuf + cbFirst,
8481 cbSecond,
8482 PGMACCESSORIGIN_IEM);
8483 if (rcStrict2 == VINF_SUCCESS)
8484 {
8485 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8486 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8488 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8489 }
8490 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8491 {
8492 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8495 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8496 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8497 }
8498#ifndef IN_RING3
8499 else if (fPostponeFail)
8500 {
8501 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8503 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8504 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8505 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8506 return iemSetPassUpStatus(pVCpu, rcStrict);
8507 }
8508#endif
8509 else
8510 {
8511 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8512 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8513 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8514 return rcStrict2;
8515 }
8516 }
8517 }
8518#ifndef IN_RING3
8519 else if (fPostponeFail)
8520 {
8521 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8523 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8524 if (!cbSecond)
8525 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8526 else
8527 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8528 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8529 return iemSetPassUpStatus(pVCpu, rcStrict);
8530 }
8531#endif
8532 else
8533 {
8534 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8535 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8536 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8537 return rcStrict;
8538 }
8539 }
8540 else
8541 {
8542 /*
8543 * No access handlers, much simpler.
8544 */
8545 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8546 if (RT_SUCCESS(rc))
8547 {
8548 if (cbSecond)
8549 {
8550 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8551 if (RT_SUCCESS(rc))
8552 { /* likely */ }
8553 else
8554 {
8555 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8556 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8557 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8558 return rc;
8559 }
8560 }
8561 }
8562 else
8563 {
8564 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8565 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8566 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8567 return rc;
8568 }
8569 }
8570 }
8571
8572#if defined(IEM_LOG_MEMORY_WRITES)
8573 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8574 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8575 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8576 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8577 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8578 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8579
8580 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8581 g_cbIemWrote = cbWrote;
8582 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8583#endif
8584
8585 /*
8586 * Free the mapping entry.
8587 */
8588 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8589 Assert(pVCpu->iem.s.cActiveMappings != 0);
8590 pVCpu->iem.s.cActiveMappings--;
8591 return VINF_SUCCESS;
8592}
8593
8594
8595/**
8596 * iemMemMap worker that deals with a request crossing pages.
8597 */
8598IEM_STATIC VBOXSTRICTRC
8599iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8600{
8601 /*
8602 * Do the address translations.
8603 */
8604 RTGCPHYS GCPhysFirst;
8605 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8606 if (rcStrict != VINF_SUCCESS)
8607 return rcStrict;
8608
8609 RTGCPHYS GCPhysSecond;
8610 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8611 fAccess, &GCPhysSecond);
8612 if (rcStrict != VINF_SUCCESS)
8613 return rcStrict;
8614 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8615
8616 PVM pVM = pVCpu->CTX_SUFF(pVM);
8617
8618 /*
8619 * Read in the current memory content if it's a read, execute or partial
8620 * write access.
8621 */
8622 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8623 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8624 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8625
8626 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8627 {
8628 if (!pVCpu->iem.s.fBypassHandlers)
8629 {
8630 /*
8631 * Must carefully deal with access handler status codes here,
8632 * makes the code a bit bloated.
8633 */
8634 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8635 if (rcStrict == VINF_SUCCESS)
8636 {
8637 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8638 if (rcStrict == VINF_SUCCESS)
8639 { /*likely */ }
8640 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8641 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8642 else
8643 {
8644 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8645 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8646 return rcStrict;
8647 }
8648 }
8649 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8650 {
8651 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8652 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8653 {
8654 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8655 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8656 }
8657 else
8658 {
8659 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8660 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8661 return rcStrict2;
8662 }
8663 }
8664 else
8665 {
8666 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8667 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8668 return rcStrict;
8669 }
8670 }
8671 else
8672 {
8673 /*
8674 * No informational status codes here, much more straight forward.
8675 */
8676 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8677 if (RT_SUCCESS(rc))
8678 {
8679 Assert(rc == VINF_SUCCESS);
8680 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8681 if (RT_SUCCESS(rc))
8682 Assert(rc == VINF_SUCCESS);
8683 else
8684 {
8685 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8686 return rc;
8687 }
8688 }
8689 else
8690 {
8691 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8692 return rc;
8693 }
8694 }
8695 }
8696#ifdef VBOX_STRICT
8697 else
8698 memset(pbBuf, 0xcc, cbMem);
8699 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8700 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8701#endif
8702
8703 /*
8704 * Commit the bounce buffer entry.
8705 */
8706 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8707 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8708 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8709 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8710 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8711 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8712 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8713 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8714 pVCpu->iem.s.cActiveMappings++;
8715
8716 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8717 *ppvMem = pbBuf;
8718 return VINF_SUCCESS;
8719}
8720
8721
8722/**
8723 * iemMemMap woker that deals with iemMemPageMap failures.
8724 */
8725IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8726 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8727{
8728 /*
8729 * Filter out conditions we can handle and the ones which shouldn't happen.
8730 */
8731 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8732 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8733 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8734 {
8735 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8736 return rcMap;
8737 }
8738 pVCpu->iem.s.cPotentialExits++;
8739
8740 /*
8741 * Read in the current memory content if it's a read, execute or partial
8742 * write access.
8743 */
8744 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8745 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8746 {
8747 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8748 memset(pbBuf, 0xff, cbMem);
8749 else
8750 {
8751 int rc;
8752 if (!pVCpu->iem.s.fBypassHandlers)
8753 {
8754 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8755 if (rcStrict == VINF_SUCCESS)
8756 { /* nothing */ }
8757 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8758 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8759 else
8760 {
8761 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8762 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8763 return rcStrict;
8764 }
8765 }
8766 else
8767 {
8768 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8769 if (RT_SUCCESS(rc))
8770 { /* likely */ }
8771 else
8772 {
8773 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8774 GCPhysFirst, rc));
8775 return rc;
8776 }
8777 }
8778 }
8779 }
8780#ifdef VBOX_STRICT
8781 else
8782 memset(pbBuf, 0xcc, cbMem);
8783#endif
8784#ifdef VBOX_STRICT
8785 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8786 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8787#endif
8788
8789 /*
8790 * Commit the bounce buffer entry.
8791 */
8792 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8793 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8794 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8795 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8796 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8797 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8798 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8799 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8800 pVCpu->iem.s.cActiveMappings++;
8801
8802 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8803 *ppvMem = pbBuf;
8804 return VINF_SUCCESS;
8805}
8806
8807
8808
8809/**
8810 * Maps the specified guest memory for the given kind of access.
8811 *
8812 * This may be using bounce buffering of the memory if it's crossing a page
8813 * boundary or if there is an access handler installed for any of it. Because
8814 * of lock prefix guarantees, we're in for some extra clutter when this
8815 * happens.
8816 *
8817 * This may raise a \#GP, \#SS, \#PF or \#AC.
8818 *
8819 * @returns VBox strict status code.
8820 *
8821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8822 * @param ppvMem Where to return the pointer to the mapped
8823 * memory.
8824 * @param cbMem The number of bytes to map. This is usually 1,
8825 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8826 * string operations it can be up to a page.
8827 * @param iSegReg The index of the segment register to use for
8828 * this access. The base and limits are checked.
8829 * Use UINT8_MAX to indicate that no segmentation
8830 * is required (for IDT, GDT and LDT accesses).
8831 * @param GCPtrMem The address of the guest memory.
8832 * @param fAccess How the memory is being accessed. The
8833 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8834 * how to map the memory, while the
8835 * IEM_ACCESS_WHAT_XXX bit is used when raising
8836 * exceptions.
8837 */
8838IEM_STATIC VBOXSTRICTRC
8839iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8840{
8841 /*
8842 * Check the input and figure out which mapping entry to use.
8843 */
8844 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8845 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8846 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8847
8848 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8849 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8850 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8851 {
8852 iMemMap = iemMemMapFindFree(pVCpu);
8853 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8854 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8855 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8856 pVCpu->iem.s.aMemMappings[2].fAccess),
8857 VERR_IEM_IPE_9);
8858 }
8859
8860 /*
8861 * Map the memory, checking that we can actually access it. If something
8862 * slightly complicated happens, fall back on bounce buffering.
8863 */
8864 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8865 if (rcStrict != VINF_SUCCESS)
8866 return rcStrict;
8867
8868 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8869 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8870
8871 RTGCPHYS GCPhysFirst;
8872 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8873 if (rcStrict != VINF_SUCCESS)
8874 return rcStrict;
8875
8876 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8877 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8878 if (fAccess & IEM_ACCESS_TYPE_READ)
8879 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8880
8881 void *pvMem;
8882 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8883 if (rcStrict != VINF_SUCCESS)
8884 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8885
8886 /*
8887 * Fill in the mapping table entry.
8888 */
8889 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8890 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8891 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8892 pVCpu->iem.s.cActiveMappings++;
8893
8894 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8895 *ppvMem = pvMem;
8896
8897 return VINF_SUCCESS;
8898}
8899
8900
8901/**
8902 * Commits the guest memory if bounce buffered and unmaps it.
8903 *
8904 * @returns Strict VBox status code.
8905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8906 * @param pvMem The mapping.
8907 * @param fAccess The kind of access.
8908 */
8909IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8910{
8911 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8912 AssertReturn(iMemMap >= 0, iMemMap);
8913
8914 /* If it's bounce buffered, we may need to write back the buffer. */
8915 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8916 {
8917 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8918 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8919 }
8920 /* Otherwise unlock it. */
8921 else
8922 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8923
8924 /* Free the entry. */
8925 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8926 Assert(pVCpu->iem.s.cActiveMappings != 0);
8927 pVCpu->iem.s.cActiveMappings--;
8928 return VINF_SUCCESS;
8929}
8930
8931#ifdef IEM_WITH_SETJMP
8932
8933/**
8934 * Maps the specified guest memory for the given kind of access, longjmp on
8935 * error.
8936 *
8937 * This may be using bounce buffering of the memory if it's crossing a page
8938 * boundary or if there is an access handler installed for any of it. Because
8939 * of lock prefix guarantees, we're in for some extra clutter when this
8940 * happens.
8941 *
8942 * This may raise a \#GP, \#SS, \#PF or \#AC.
8943 *
8944 * @returns Pointer to the mapped memory.
8945 *
8946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8947 * @param cbMem The number of bytes to map. This is usually 1,
8948 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8949 * string operations it can be up to a page.
8950 * @param iSegReg The index of the segment register to use for
8951 * this access. The base and limits are checked.
8952 * Use UINT8_MAX to indicate that no segmentation
8953 * is required (for IDT, GDT and LDT accesses).
8954 * @param GCPtrMem The address of the guest memory.
8955 * @param fAccess How the memory is being accessed. The
8956 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8957 * how to map the memory, while the
8958 * IEM_ACCESS_WHAT_XXX bit is used when raising
8959 * exceptions.
8960 */
8961IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8962{
8963 /*
8964 * Check the input and figure out which mapping entry to use.
8965 */
8966 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8967 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8968 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8969
8970 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8971 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8972 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8973 {
8974 iMemMap = iemMemMapFindFree(pVCpu);
8975 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8976 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8977 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8978 pVCpu->iem.s.aMemMappings[2].fAccess),
8979 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8980 }
8981
8982 /*
8983 * Map the memory, checking that we can actually access it. If something
8984 * slightly complicated happens, fall back on bounce buffering.
8985 */
8986 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8987 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8988 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8989
8990 /* Crossing a page boundary? */
8991 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8992 { /* No (likely). */ }
8993 else
8994 {
8995 void *pvMem;
8996 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8997 if (rcStrict == VINF_SUCCESS)
8998 return pvMem;
8999 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9000 }
9001
9002 RTGCPHYS GCPhysFirst;
9003 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9004 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9005 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9006
9007 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9008 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9009 if (fAccess & IEM_ACCESS_TYPE_READ)
9010 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9011
9012 void *pvMem;
9013 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9014 if (rcStrict == VINF_SUCCESS)
9015 { /* likely */ }
9016 else
9017 {
9018 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9019 if (rcStrict == VINF_SUCCESS)
9020 return pvMem;
9021 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9022 }
9023
9024 /*
9025 * Fill in the mapping table entry.
9026 */
9027 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9028 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9029 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9030 pVCpu->iem.s.cActiveMappings++;
9031
9032 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9033 return pvMem;
9034}
9035
9036
9037/**
9038 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9039 *
9040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9041 * @param pvMem The mapping.
9042 * @param fAccess The kind of access.
9043 */
9044IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9045{
9046 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9047 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9048
9049 /* If it's bounce buffered, we may need to write back the buffer. */
9050 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9051 {
9052 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9053 {
9054 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9055 if (rcStrict == VINF_SUCCESS)
9056 return;
9057 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9058 }
9059 }
9060 /* Otherwise unlock it. */
9061 else
9062 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9063
9064 /* Free the entry. */
9065 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9066 Assert(pVCpu->iem.s.cActiveMappings != 0);
9067 pVCpu->iem.s.cActiveMappings--;
9068}
9069
9070#endif /* IEM_WITH_SETJMP */
9071
9072#ifndef IN_RING3
9073/**
9074 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9075 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9076 *
9077 * Allows the instruction to be completed and retired, while the IEM user will
9078 * return to ring-3 immediately afterwards and do the postponed writes there.
9079 *
9080 * @returns VBox status code (no strict statuses). Caller must check
9081 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9083 * @param pvMem The mapping.
9084 * @param fAccess The kind of access.
9085 */
9086IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9087{
9088 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9089 AssertReturn(iMemMap >= 0, iMemMap);
9090
9091 /* If it's bounce buffered, we may need to write back the buffer. */
9092 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9093 {
9094 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9095 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9096 }
9097 /* Otherwise unlock it. */
9098 else
9099 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9100
9101 /* Free the entry. */
9102 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9103 Assert(pVCpu->iem.s.cActiveMappings != 0);
9104 pVCpu->iem.s.cActiveMappings--;
9105 return VINF_SUCCESS;
9106}
9107#endif
9108
9109
9110/**
9111 * Rollbacks mappings, releasing page locks and such.
9112 *
9113 * The caller shall only call this after checking cActiveMappings.
9114 *
9115 * @returns Strict VBox status code to pass up.
9116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9117 */
9118IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9119{
9120 Assert(pVCpu->iem.s.cActiveMappings > 0);
9121
9122 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9123 while (iMemMap-- > 0)
9124 {
9125 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9126 if (fAccess != IEM_ACCESS_INVALID)
9127 {
9128 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9129 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9130 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9131 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9132 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9133 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9134 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9135 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9136 pVCpu->iem.s.cActiveMappings--;
9137 }
9138 }
9139}
9140
9141
9142/**
9143 * Fetches a data byte.
9144 *
9145 * @returns Strict VBox status code.
9146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9147 * @param pu8Dst Where to return the byte.
9148 * @param iSegReg The index of the segment register to use for
9149 * this access. The base and limits are checked.
9150 * @param GCPtrMem The address of the guest memory.
9151 */
9152IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9153{
9154 /* The lazy approach for now... */
9155 uint8_t const *pu8Src;
9156 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9157 if (rc == VINF_SUCCESS)
9158 {
9159 *pu8Dst = *pu8Src;
9160 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9161 }
9162 return rc;
9163}
9164
9165
9166#ifdef IEM_WITH_SETJMP
9167/**
9168 * Fetches a data byte, longjmp on error.
9169 *
9170 * @returns The byte.
9171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9172 * @param iSegReg The index of the segment register to use for
9173 * this access. The base and limits are checked.
9174 * @param GCPtrMem The address of the guest memory.
9175 */
9176DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9177{
9178 /* The lazy approach for now... */
9179 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9180 uint8_t const bRet = *pu8Src;
9181 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9182 return bRet;
9183}
9184#endif /* IEM_WITH_SETJMP */
9185
9186
9187/**
9188 * Fetches a data word.
9189 *
9190 * @returns Strict VBox status code.
9191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9192 * @param pu16Dst Where to return the word.
9193 * @param iSegReg The index of the segment register to use for
9194 * this access. The base and limits are checked.
9195 * @param GCPtrMem The address of the guest memory.
9196 */
9197IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9198{
9199 /* The lazy approach for now... */
9200 uint16_t const *pu16Src;
9201 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9202 if (rc == VINF_SUCCESS)
9203 {
9204 *pu16Dst = *pu16Src;
9205 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9206 }
9207 return rc;
9208}
9209
9210
9211#ifdef IEM_WITH_SETJMP
9212/**
9213 * Fetches a data word, longjmp on error.
9214 *
9215 * @returns The word
9216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9217 * @param iSegReg The index of the segment register to use for
9218 * this access. The base and limits are checked.
9219 * @param GCPtrMem The address of the guest memory.
9220 */
9221DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9222{
9223 /* The lazy approach for now... */
9224 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9225 uint16_t const u16Ret = *pu16Src;
9226 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9227 return u16Ret;
9228}
9229#endif
9230
9231
9232/**
9233 * Fetches a data dword.
9234 *
9235 * @returns Strict VBox status code.
9236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9237 * @param pu32Dst Where to return the dword.
9238 * @param iSegReg The index of the segment register to use for
9239 * this access. The base and limits are checked.
9240 * @param GCPtrMem The address of the guest memory.
9241 */
9242IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9243{
9244 /* The lazy approach for now... */
9245 uint32_t const *pu32Src;
9246 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9247 if (rc == VINF_SUCCESS)
9248 {
9249 *pu32Dst = *pu32Src;
9250 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9251 }
9252 return rc;
9253}
9254
9255
9256#ifdef IEM_WITH_SETJMP
9257
9258IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9259{
9260 Assert(cbMem >= 1);
9261 Assert(iSegReg < X86_SREG_COUNT);
9262
9263 /*
9264 * 64-bit mode is simpler.
9265 */
9266 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9267 {
9268 if (iSegReg >= X86_SREG_FS)
9269 {
9270 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9271 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9272 GCPtrMem += pSel->u64Base;
9273 }
9274
9275 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9276 return GCPtrMem;
9277 }
9278 /*
9279 * 16-bit and 32-bit segmentation.
9280 */
9281 else
9282 {
9283 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9284 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9285 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9286 == X86DESCATTR_P /* data, expand up */
9287 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9288 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9289 {
9290 /* expand up */
9291 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9292 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9293 && GCPtrLast32 > (uint32_t)GCPtrMem))
9294 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9295 }
9296 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9297 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9298 {
9299 /* expand down */
9300 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9301 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9302 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9303 && GCPtrLast32 > (uint32_t)GCPtrMem))
9304 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9305 }
9306 else
9307 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9308 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9309 }
9310 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9311}
9312
9313
9314IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9315{
9316 Assert(cbMem >= 1);
9317 Assert(iSegReg < X86_SREG_COUNT);
9318
9319 /*
9320 * 64-bit mode is simpler.
9321 */
9322 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9323 {
9324 if (iSegReg >= X86_SREG_FS)
9325 {
9326 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9327 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9328 GCPtrMem += pSel->u64Base;
9329 }
9330
9331 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9332 return GCPtrMem;
9333 }
9334 /*
9335 * 16-bit and 32-bit segmentation.
9336 */
9337 else
9338 {
9339 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9340 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9341 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9342 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9343 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9344 {
9345 /* expand up */
9346 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9347 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9348 && GCPtrLast32 > (uint32_t)GCPtrMem))
9349 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9350 }
9351 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9352 {
9353 /* expand down */
9354 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9355 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9356 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9357 && GCPtrLast32 > (uint32_t)GCPtrMem))
9358 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9359 }
9360 else
9361 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9362 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9363 }
9364 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9365}
9366
9367
9368/**
9369 * Fetches a data dword, longjmp on error, fallback/safe version.
9370 *
9371 * @returns The dword
9372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9373 * @param iSegReg The index of the segment register to use for
9374 * this access. The base and limits are checked.
9375 * @param GCPtrMem The address of the guest memory.
9376 */
9377IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9378{
9379 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9380 uint32_t const u32Ret = *pu32Src;
9381 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9382 return u32Ret;
9383}
9384
9385
9386/**
9387 * Fetches a data dword, longjmp on error.
9388 *
9389 * @returns The dword
9390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9391 * @param iSegReg The index of the segment register to use for
9392 * this access. The base and limits are checked.
9393 * @param GCPtrMem The address of the guest memory.
9394 */
9395DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9396{
9397# ifdef IEM_WITH_DATA_TLB
9398 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9399 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9400 {
9401 /// @todo more later.
9402 }
9403
9404 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9405# else
9406 /* The lazy approach. */
9407 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9408 uint32_t const u32Ret = *pu32Src;
9409 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9410 return u32Ret;
9411# endif
9412}
9413#endif
9414
9415
9416#ifdef SOME_UNUSED_FUNCTION
9417/**
9418 * Fetches a data dword and sign extends it to a qword.
9419 *
9420 * @returns Strict VBox status code.
9421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9422 * @param pu64Dst Where to return the sign extended value.
9423 * @param iSegReg The index of the segment register to use for
9424 * this access. The base and limits are checked.
9425 * @param GCPtrMem The address of the guest memory.
9426 */
9427IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9428{
9429 /* The lazy approach for now... */
9430 int32_t const *pi32Src;
9431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9432 if (rc == VINF_SUCCESS)
9433 {
9434 *pu64Dst = *pi32Src;
9435 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9436 }
9437#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9438 else
9439 *pu64Dst = 0;
9440#endif
9441 return rc;
9442}
9443#endif
9444
9445
9446/**
9447 * Fetches a data qword.
9448 *
9449 * @returns Strict VBox status code.
9450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9451 * @param pu64Dst Where to return the qword.
9452 * @param iSegReg The index of the segment register to use for
9453 * this access. The base and limits are checked.
9454 * @param GCPtrMem The address of the guest memory.
9455 */
9456IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9457{
9458 /* The lazy approach for now... */
9459 uint64_t const *pu64Src;
9460 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9461 if (rc == VINF_SUCCESS)
9462 {
9463 *pu64Dst = *pu64Src;
9464 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9465 }
9466 return rc;
9467}
9468
9469
9470#ifdef IEM_WITH_SETJMP
9471/**
9472 * Fetches a data qword, longjmp on error.
9473 *
9474 * @returns The qword.
9475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9476 * @param iSegReg The index of the segment register to use for
9477 * this access. The base and limits are checked.
9478 * @param GCPtrMem The address of the guest memory.
9479 */
9480DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9481{
9482 /* The lazy approach for now... */
9483 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9484 uint64_t const u64Ret = *pu64Src;
9485 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9486 return u64Ret;
9487}
9488#endif
9489
9490
9491/**
9492 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9493 *
9494 * @returns Strict VBox status code.
9495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9496 * @param pu64Dst Where to return the qword.
9497 * @param iSegReg The index of the segment register to use for
9498 * this access. The base and limits are checked.
9499 * @param GCPtrMem The address of the guest memory.
9500 */
9501IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9502{
9503 /* The lazy approach for now... */
9504 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9505 if (RT_UNLIKELY(GCPtrMem & 15))
9506 return iemRaiseGeneralProtectionFault0(pVCpu);
9507
9508 uint64_t const *pu64Src;
9509 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9510 if (rc == VINF_SUCCESS)
9511 {
9512 *pu64Dst = *pu64Src;
9513 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9514 }
9515 return rc;
9516}
9517
9518
9519#ifdef IEM_WITH_SETJMP
9520/**
9521 * Fetches a data qword, longjmp on error.
9522 *
9523 * @returns The qword.
9524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9525 * @param iSegReg The index of the segment register to use for
9526 * this access. The base and limits are checked.
9527 * @param GCPtrMem The address of the guest memory.
9528 */
9529DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9530{
9531 /* The lazy approach for now... */
9532 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9533 if (RT_LIKELY(!(GCPtrMem & 15)))
9534 {
9535 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9536 uint64_t const u64Ret = *pu64Src;
9537 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9538 return u64Ret;
9539 }
9540
9541 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9542 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9543}
9544#endif
9545
9546
9547/**
9548 * Fetches a data tword.
9549 *
9550 * @returns Strict VBox status code.
9551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9552 * @param pr80Dst Where to return the tword.
9553 * @param iSegReg The index of the segment register to use for
9554 * this access. The base and limits are checked.
9555 * @param GCPtrMem The address of the guest memory.
9556 */
9557IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9558{
9559 /* The lazy approach for now... */
9560 PCRTFLOAT80U pr80Src;
9561 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9562 if (rc == VINF_SUCCESS)
9563 {
9564 *pr80Dst = *pr80Src;
9565 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9566 }
9567 return rc;
9568}
9569
9570
9571#ifdef IEM_WITH_SETJMP
9572/**
9573 * Fetches a data tword, longjmp on error.
9574 *
9575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9576 * @param pr80Dst Where to return the tword.
9577 * @param iSegReg The index of the segment register to use for
9578 * this access. The base and limits are checked.
9579 * @param GCPtrMem The address of the guest memory.
9580 */
9581DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9582{
9583 /* The lazy approach for now... */
9584 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9585 *pr80Dst = *pr80Src;
9586 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9587}
9588#endif
9589
9590
9591/**
9592 * Fetches a data dqword (double qword), generally SSE related.
9593 *
9594 * @returns Strict VBox status code.
9595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9596 * @param pu128Dst Where to return the qword.
9597 * @param iSegReg The index of the segment register to use for
9598 * this access. The base and limits are checked.
9599 * @param GCPtrMem The address of the guest memory.
9600 */
9601IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9602{
9603 /* The lazy approach for now... */
9604 PCRTUINT128U pu128Src;
9605 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9606 if (rc == VINF_SUCCESS)
9607 {
9608 pu128Dst->au64[0] = pu128Src->au64[0];
9609 pu128Dst->au64[1] = pu128Src->au64[1];
9610 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9611 }
9612 return rc;
9613}
9614
9615
9616#ifdef IEM_WITH_SETJMP
9617/**
9618 * Fetches a data dqword (double qword), generally SSE related.
9619 *
9620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9621 * @param pu128Dst Where to return the qword.
9622 * @param iSegReg The index of the segment register to use for
9623 * this access. The base and limits are checked.
9624 * @param GCPtrMem The address of the guest memory.
9625 */
9626IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9627{
9628 /* The lazy approach for now... */
9629 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9630 pu128Dst->au64[0] = pu128Src->au64[0];
9631 pu128Dst->au64[1] = pu128Src->au64[1];
9632 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9633}
9634#endif
9635
9636
9637/**
9638 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9639 * related.
9640 *
9641 * Raises \#GP(0) if not aligned.
9642 *
9643 * @returns Strict VBox status code.
9644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9645 * @param pu128Dst Where to return the qword.
9646 * @param iSegReg The index of the segment register to use for
9647 * this access. The base and limits are checked.
9648 * @param GCPtrMem The address of the guest memory.
9649 */
9650IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9651{
9652 /* The lazy approach for now... */
9653 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9654 if ( (GCPtrMem & 15)
9655 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9656 return iemRaiseGeneralProtectionFault0(pVCpu);
9657
9658 PCRTUINT128U pu128Src;
9659 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9660 if (rc == VINF_SUCCESS)
9661 {
9662 pu128Dst->au64[0] = pu128Src->au64[0];
9663 pu128Dst->au64[1] = pu128Src->au64[1];
9664 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9665 }
9666 return rc;
9667}
9668
9669
9670#ifdef IEM_WITH_SETJMP
9671/**
9672 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9673 * related, longjmp on error.
9674 *
9675 * Raises \#GP(0) if not aligned.
9676 *
9677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9678 * @param pu128Dst Where to return the qword.
9679 * @param iSegReg The index of the segment register to use for
9680 * this access. The base and limits are checked.
9681 * @param GCPtrMem The address of the guest memory.
9682 */
9683DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9684{
9685 /* The lazy approach for now... */
9686 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9687 if ( (GCPtrMem & 15) == 0
9688 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9689 {
9690 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9691 pu128Dst->au64[0] = pu128Src->au64[0];
9692 pu128Dst->au64[1] = pu128Src->au64[1];
9693 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9694 return;
9695 }
9696
9697 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9698 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9699}
9700#endif
9701
9702
9703/**
9704 * Fetches a data oword (octo word), generally AVX related.
9705 *
9706 * @returns Strict VBox status code.
9707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9708 * @param pu256Dst Where to return the qword.
9709 * @param iSegReg The index of the segment register to use for
9710 * this access. The base and limits are checked.
9711 * @param GCPtrMem The address of the guest memory.
9712 */
9713IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9714{
9715 /* The lazy approach for now... */
9716 PCRTUINT256U pu256Src;
9717 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9718 if (rc == VINF_SUCCESS)
9719 {
9720 pu256Dst->au64[0] = pu256Src->au64[0];
9721 pu256Dst->au64[1] = pu256Src->au64[1];
9722 pu256Dst->au64[2] = pu256Src->au64[2];
9723 pu256Dst->au64[3] = pu256Src->au64[3];
9724 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9725 }
9726 return rc;
9727}
9728
9729
9730#ifdef IEM_WITH_SETJMP
9731/**
9732 * Fetches a data oword (octo word), generally AVX related.
9733 *
9734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9735 * @param pu256Dst Where to return the qword.
9736 * @param iSegReg The index of the segment register to use for
9737 * this access. The base and limits are checked.
9738 * @param GCPtrMem The address of the guest memory.
9739 */
9740IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9741{
9742 /* The lazy approach for now... */
9743 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9744 pu256Dst->au64[0] = pu256Src->au64[0];
9745 pu256Dst->au64[1] = pu256Src->au64[1];
9746 pu256Dst->au64[2] = pu256Src->au64[2];
9747 pu256Dst->au64[3] = pu256Src->au64[3];
9748 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9749}
9750#endif
9751
9752
9753/**
9754 * Fetches a data oword (octo word) at an aligned address, generally AVX
9755 * related.
9756 *
9757 * Raises \#GP(0) if not aligned.
9758 *
9759 * @returns Strict VBox status code.
9760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9761 * @param pu256Dst Where to return the qword.
9762 * @param iSegReg The index of the segment register to use for
9763 * this access. The base and limits are checked.
9764 * @param GCPtrMem The address of the guest memory.
9765 */
9766IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9767{
9768 /* The lazy approach for now... */
9769 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9770 if (GCPtrMem & 31)
9771 return iemRaiseGeneralProtectionFault0(pVCpu);
9772
9773 PCRTUINT256U pu256Src;
9774 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9775 if (rc == VINF_SUCCESS)
9776 {
9777 pu256Dst->au64[0] = pu256Src->au64[0];
9778 pu256Dst->au64[1] = pu256Src->au64[1];
9779 pu256Dst->au64[2] = pu256Src->au64[2];
9780 pu256Dst->au64[3] = pu256Src->au64[3];
9781 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9782 }
9783 return rc;
9784}
9785
9786
9787#ifdef IEM_WITH_SETJMP
9788/**
9789 * Fetches a data oword (octo word) at an aligned address, generally AVX
9790 * related, longjmp on error.
9791 *
9792 * Raises \#GP(0) if not aligned.
9793 *
9794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9795 * @param pu256Dst Where to return the qword.
9796 * @param iSegReg The index of the segment register to use for
9797 * this access. The base and limits are checked.
9798 * @param GCPtrMem The address of the guest memory.
9799 */
9800DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9801{
9802 /* The lazy approach for now... */
9803 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9804 if ((GCPtrMem & 31) == 0)
9805 {
9806 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9807 pu256Dst->au64[0] = pu256Src->au64[0];
9808 pu256Dst->au64[1] = pu256Src->au64[1];
9809 pu256Dst->au64[2] = pu256Src->au64[2];
9810 pu256Dst->au64[3] = pu256Src->au64[3];
9811 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9812 return;
9813 }
9814
9815 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9816 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9817}
9818#endif
9819
9820
9821
9822/**
9823 * Fetches a descriptor register (lgdt, lidt).
9824 *
9825 * @returns Strict VBox status code.
9826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9827 * @param pcbLimit Where to return the limit.
9828 * @param pGCPtrBase Where to return the base.
9829 * @param iSegReg The index of the segment register to use for
9830 * this access. The base and limits are checked.
9831 * @param GCPtrMem The address of the guest memory.
9832 * @param enmOpSize The effective operand size.
9833 */
9834IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9835 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9836{
9837 /*
9838 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9839 * little special:
9840 * - The two reads are done separately.
9841 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9842 * - We suspect the 386 to actually commit the limit before the base in
9843 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9844 * don't try emulate this eccentric behavior, because it's not well
9845 * enough understood and rather hard to trigger.
9846 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9847 */
9848 VBOXSTRICTRC rcStrict;
9849 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9850 {
9851 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9852 if (rcStrict == VINF_SUCCESS)
9853 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9854 }
9855 else
9856 {
9857 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9858 if (enmOpSize == IEMMODE_32BIT)
9859 {
9860 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9861 {
9862 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9863 if (rcStrict == VINF_SUCCESS)
9864 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9865 }
9866 else
9867 {
9868 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9869 if (rcStrict == VINF_SUCCESS)
9870 {
9871 *pcbLimit = (uint16_t)uTmp;
9872 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9873 }
9874 }
9875 if (rcStrict == VINF_SUCCESS)
9876 *pGCPtrBase = uTmp;
9877 }
9878 else
9879 {
9880 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9881 if (rcStrict == VINF_SUCCESS)
9882 {
9883 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9884 if (rcStrict == VINF_SUCCESS)
9885 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9886 }
9887 }
9888 }
9889 return rcStrict;
9890}
9891
9892
9893
9894/**
9895 * Stores a data byte.
9896 *
9897 * @returns Strict VBox status code.
9898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9899 * @param iSegReg The index of the segment register to use for
9900 * this access. The base and limits are checked.
9901 * @param GCPtrMem The address of the guest memory.
9902 * @param u8Value The value to store.
9903 */
9904IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9905{
9906 /* The lazy approach for now... */
9907 uint8_t *pu8Dst;
9908 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9909 if (rc == VINF_SUCCESS)
9910 {
9911 *pu8Dst = u8Value;
9912 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9913 }
9914 return rc;
9915}
9916
9917
9918#ifdef IEM_WITH_SETJMP
9919/**
9920 * Stores a data byte, longjmp on error.
9921 *
9922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9923 * @param iSegReg The index of the segment register to use for
9924 * this access. The base and limits are checked.
9925 * @param GCPtrMem The address of the guest memory.
9926 * @param u8Value The value to store.
9927 */
9928IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9929{
9930 /* The lazy approach for now... */
9931 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9932 *pu8Dst = u8Value;
9933 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9934}
9935#endif
9936
9937
9938/**
9939 * Stores a data word.
9940 *
9941 * @returns Strict VBox status code.
9942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9943 * @param iSegReg The index of the segment register to use for
9944 * this access. The base and limits are checked.
9945 * @param GCPtrMem The address of the guest memory.
9946 * @param u16Value The value to store.
9947 */
9948IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9949{
9950 /* The lazy approach for now... */
9951 uint16_t *pu16Dst;
9952 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9953 if (rc == VINF_SUCCESS)
9954 {
9955 *pu16Dst = u16Value;
9956 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9957 }
9958 return rc;
9959}
9960
9961
9962#ifdef IEM_WITH_SETJMP
9963/**
9964 * Stores a data word, longjmp on error.
9965 *
9966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9967 * @param iSegReg The index of the segment register to use for
9968 * this access. The base and limits are checked.
9969 * @param GCPtrMem The address of the guest memory.
9970 * @param u16Value The value to store.
9971 */
9972IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9973{
9974 /* The lazy approach for now... */
9975 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9976 *pu16Dst = u16Value;
9977 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9978}
9979#endif
9980
9981
9982/**
9983 * Stores a data dword.
9984 *
9985 * @returns Strict VBox status code.
9986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9987 * @param iSegReg The index of the segment register to use for
9988 * this access. The base and limits are checked.
9989 * @param GCPtrMem The address of the guest memory.
9990 * @param u32Value The value to store.
9991 */
9992IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9993{
9994 /* The lazy approach for now... */
9995 uint32_t *pu32Dst;
9996 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9997 if (rc == VINF_SUCCESS)
9998 {
9999 *pu32Dst = u32Value;
10000 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10001 }
10002 return rc;
10003}
10004
10005
10006#ifdef IEM_WITH_SETJMP
10007/**
10008 * Stores a data dword.
10009 *
10010 * @returns Strict VBox status code.
10011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10012 * @param iSegReg The index of the segment register to use for
10013 * this access. The base and limits are checked.
10014 * @param GCPtrMem The address of the guest memory.
10015 * @param u32Value The value to store.
10016 */
10017IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10018{
10019 /* The lazy approach for now... */
10020 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10021 *pu32Dst = u32Value;
10022 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10023}
10024#endif
10025
10026
10027/**
10028 * Stores a data qword.
10029 *
10030 * @returns Strict VBox status code.
10031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10032 * @param iSegReg The index of the segment register to use for
10033 * this access. The base and limits are checked.
10034 * @param GCPtrMem The address of the guest memory.
10035 * @param u64Value The value to store.
10036 */
10037IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10038{
10039 /* The lazy approach for now... */
10040 uint64_t *pu64Dst;
10041 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10042 if (rc == VINF_SUCCESS)
10043 {
10044 *pu64Dst = u64Value;
10045 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10046 }
10047 return rc;
10048}
10049
10050
10051#ifdef IEM_WITH_SETJMP
10052/**
10053 * Stores a data qword, longjmp on error.
10054 *
10055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10056 * @param iSegReg The index of the segment register to use for
10057 * this access. The base and limits are checked.
10058 * @param GCPtrMem The address of the guest memory.
10059 * @param u64Value The value to store.
10060 */
10061IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10062{
10063 /* The lazy approach for now... */
10064 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10065 *pu64Dst = u64Value;
10066 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10067}
10068#endif
10069
10070
10071/**
10072 * Stores a data dqword.
10073 *
10074 * @returns Strict VBox status code.
10075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10076 * @param iSegReg The index of the segment register to use for
10077 * this access. The base and limits are checked.
10078 * @param GCPtrMem The address of the guest memory.
10079 * @param u128Value The value to store.
10080 */
10081IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10082{
10083 /* The lazy approach for now... */
10084 PRTUINT128U pu128Dst;
10085 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10086 if (rc == VINF_SUCCESS)
10087 {
10088 pu128Dst->au64[0] = u128Value.au64[0];
10089 pu128Dst->au64[1] = u128Value.au64[1];
10090 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10091 }
10092 return rc;
10093}
10094
10095
10096#ifdef IEM_WITH_SETJMP
10097/**
10098 * Stores a data dqword, longjmp on error.
10099 *
10100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10101 * @param iSegReg The index of the segment register to use for
10102 * this access. The base and limits are checked.
10103 * @param GCPtrMem The address of the guest memory.
10104 * @param u128Value The value to store.
10105 */
10106IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10107{
10108 /* The lazy approach for now... */
10109 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10110 pu128Dst->au64[0] = u128Value.au64[0];
10111 pu128Dst->au64[1] = u128Value.au64[1];
10112 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10113}
10114#endif
10115
10116
10117/**
10118 * Stores a data dqword, SSE aligned.
10119 *
10120 * @returns Strict VBox status code.
10121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10122 * @param iSegReg The index of the segment register to use for
10123 * this access. The base and limits are checked.
10124 * @param GCPtrMem The address of the guest memory.
10125 * @param u128Value The value to store.
10126 */
10127IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10128{
10129 /* The lazy approach for now... */
10130 if ( (GCPtrMem & 15)
10131 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10132 return iemRaiseGeneralProtectionFault0(pVCpu);
10133
10134 PRTUINT128U pu128Dst;
10135 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10136 if (rc == VINF_SUCCESS)
10137 {
10138 pu128Dst->au64[0] = u128Value.au64[0];
10139 pu128Dst->au64[1] = u128Value.au64[1];
10140 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10141 }
10142 return rc;
10143}
10144
10145
10146#ifdef IEM_WITH_SETJMP
10147/**
10148 * Stores a data dqword, SSE aligned.
10149 *
10150 * @returns Strict VBox status code.
10151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10152 * @param iSegReg The index of the segment register to use for
10153 * this access. The base and limits are checked.
10154 * @param GCPtrMem The address of the guest memory.
10155 * @param u128Value The value to store.
10156 */
10157DECL_NO_INLINE(IEM_STATIC, void)
10158iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10159{
10160 /* The lazy approach for now... */
10161 if ( (GCPtrMem & 15) == 0
10162 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10163 {
10164 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10165 pu128Dst->au64[0] = u128Value.au64[0];
10166 pu128Dst->au64[1] = u128Value.au64[1];
10167 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10168 return;
10169 }
10170
10171 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10172 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10173}
10174#endif
10175
10176
10177/**
10178 * Stores a data dqword.
10179 *
10180 * @returns Strict VBox status code.
10181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10182 * @param iSegReg The index of the segment register to use for
10183 * this access. The base and limits are checked.
10184 * @param GCPtrMem The address of the guest memory.
10185 * @param pu256Value Pointer to the value to store.
10186 */
10187IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10188{
10189 /* The lazy approach for now... */
10190 PRTUINT256U pu256Dst;
10191 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10192 if (rc == VINF_SUCCESS)
10193 {
10194 pu256Dst->au64[0] = pu256Value->au64[0];
10195 pu256Dst->au64[1] = pu256Value->au64[1];
10196 pu256Dst->au64[2] = pu256Value->au64[2];
10197 pu256Dst->au64[3] = pu256Value->au64[3];
10198 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10199 }
10200 return rc;
10201}
10202
10203
10204#ifdef IEM_WITH_SETJMP
10205/**
10206 * Stores a data dqword, longjmp on error.
10207 *
10208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10209 * @param iSegReg The index of the segment register to use for
10210 * this access. The base and limits are checked.
10211 * @param GCPtrMem The address of the guest memory.
10212 * @param pu256Value Pointer to the value to store.
10213 */
10214IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10215{
10216 /* The lazy approach for now... */
10217 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10218 pu256Dst->au64[0] = pu256Value->au64[0];
10219 pu256Dst->au64[1] = pu256Value->au64[1];
10220 pu256Dst->au64[2] = pu256Value->au64[2];
10221 pu256Dst->au64[3] = pu256Value->au64[3];
10222 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10223}
10224#endif
10225
10226
10227/**
10228 * Stores a data dqword, AVX aligned.
10229 *
10230 * @returns Strict VBox status code.
10231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10232 * @param iSegReg The index of the segment register to use for
10233 * this access. The base and limits are checked.
10234 * @param GCPtrMem The address of the guest memory.
10235 * @param pu256Value Pointer to the value to store.
10236 */
10237IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10238{
10239 /* The lazy approach for now... */
10240 if (GCPtrMem & 31)
10241 return iemRaiseGeneralProtectionFault0(pVCpu);
10242
10243 PRTUINT256U pu256Dst;
10244 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10245 if (rc == VINF_SUCCESS)
10246 {
10247 pu256Dst->au64[0] = pu256Value->au64[0];
10248 pu256Dst->au64[1] = pu256Value->au64[1];
10249 pu256Dst->au64[2] = pu256Value->au64[2];
10250 pu256Dst->au64[3] = pu256Value->au64[3];
10251 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10252 }
10253 return rc;
10254}
10255
10256
10257#ifdef IEM_WITH_SETJMP
10258/**
10259 * Stores a data dqword, AVX aligned.
10260 *
10261 * @returns Strict VBox status code.
10262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10263 * @param iSegReg The index of the segment register to use for
10264 * this access. The base and limits are checked.
10265 * @param GCPtrMem The address of the guest memory.
10266 * @param pu256Value Pointer to the value to store.
10267 */
10268DECL_NO_INLINE(IEM_STATIC, void)
10269iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10270{
10271 /* The lazy approach for now... */
10272 if ((GCPtrMem & 31) == 0)
10273 {
10274 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10275 pu256Dst->au64[0] = pu256Value->au64[0];
10276 pu256Dst->au64[1] = pu256Value->au64[1];
10277 pu256Dst->au64[2] = pu256Value->au64[2];
10278 pu256Dst->au64[3] = pu256Value->au64[3];
10279 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10280 return;
10281 }
10282
10283 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10284 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10285}
10286#endif
10287
10288
10289/**
10290 * Stores a descriptor register (sgdt, sidt).
10291 *
10292 * @returns Strict VBox status code.
10293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10294 * @param cbLimit The limit.
10295 * @param GCPtrBase The base address.
10296 * @param iSegReg The index of the segment register to use for
10297 * this access. The base and limits are checked.
10298 * @param GCPtrMem The address of the guest memory.
10299 */
10300IEM_STATIC VBOXSTRICTRC
10301iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10302{
10303 /*
10304 * The SIDT and SGDT instructions actually stores the data using two
10305 * independent writes. The instructions does not respond to opsize prefixes.
10306 */
10307 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10308 if (rcStrict == VINF_SUCCESS)
10309 {
10310 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10311 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10312 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10313 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10314 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10315 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10316 else
10317 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10318 }
10319 return rcStrict;
10320}
10321
10322
10323/**
10324 * Pushes a word onto the stack.
10325 *
10326 * @returns Strict VBox status code.
10327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10328 * @param u16Value The value to push.
10329 */
10330IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10331{
10332 /* Increment the stack pointer. */
10333 uint64_t uNewRsp;
10334 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10335
10336 /* Write the word the lazy way. */
10337 uint16_t *pu16Dst;
10338 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10339 if (rc == VINF_SUCCESS)
10340 {
10341 *pu16Dst = u16Value;
10342 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10343 }
10344
10345 /* Commit the new RSP value unless we an access handler made trouble. */
10346 if (rc == VINF_SUCCESS)
10347 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10348
10349 return rc;
10350}
10351
10352
10353/**
10354 * Pushes a dword onto the stack.
10355 *
10356 * @returns Strict VBox status code.
10357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10358 * @param u32Value The value to push.
10359 */
10360IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10361{
10362 /* Increment the stack pointer. */
10363 uint64_t uNewRsp;
10364 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10365
10366 /* Write the dword the lazy way. */
10367 uint32_t *pu32Dst;
10368 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10369 if (rc == VINF_SUCCESS)
10370 {
10371 *pu32Dst = u32Value;
10372 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10373 }
10374
10375 /* Commit the new RSP value unless we an access handler made trouble. */
10376 if (rc == VINF_SUCCESS)
10377 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10378
10379 return rc;
10380}
10381
10382
10383/**
10384 * Pushes a dword segment register value onto the stack.
10385 *
10386 * @returns Strict VBox status code.
10387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10388 * @param u32Value The value to push.
10389 */
10390IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10391{
10392 /* Increment the stack pointer. */
10393 uint64_t uNewRsp;
10394 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10395
10396 /* The intel docs talks about zero extending the selector register
10397 value. My actual intel CPU here might be zero extending the value
10398 but it still only writes the lower word... */
10399 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10400 * happens when crossing an electric page boundrary, is the high word checked
10401 * for write accessibility or not? Probably it is. What about segment limits?
10402 * It appears this behavior is also shared with trap error codes.
10403 *
10404 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10405 * ancient hardware when it actually did change. */
10406 uint16_t *pu16Dst;
10407 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10408 if (rc == VINF_SUCCESS)
10409 {
10410 *pu16Dst = (uint16_t)u32Value;
10411 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10412 }
10413
10414 /* Commit the new RSP value unless we an access handler made trouble. */
10415 if (rc == VINF_SUCCESS)
10416 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10417
10418 return rc;
10419}
10420
10421
10422/**
10423 * Pushes a qword onto the stack.
10424 *
10425 * @returns Strict VBox status code.
10426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10427 * @param u64Value The value to push.
10428 */
10429IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10430{
10431 /* Increment the stack pointer. */
10432 uint64_t uNewRsp;
10433 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10434
10435 /* Write the word the lazy way. */
10436 uint64_t *pu64Dst;
10437 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10438 if (rc == VINF_SUCCESS)
10439 {
10440 *pu64Dst = u64Value;
10441 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10442 }
10443
10444 /* Commit the new RSP value unless we an access handler made trouble. */
10445 if (rc == VINF_SUCCESS)
10446 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10447
10448 return rc;
10449}
10450
10451
10452/**
10453 * Pops a word from the stack.
10454 *
10455 * @returns Strict VBox status code.
10456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10457 * @param pu16Value Where to store the popped value.
10458 */
10459IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10460{
10461 /* Increment the stack pointer. */
10462 uint64_t uNewRsp;
10463 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10464
10465 /* Write the word the lazy way. */
10466 uint16_t const *pu16Src;
10467 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10468 if (rc == VINF_SUCCESS)
10469 {
10470 *pu16Value = *pu16Src;
10471 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10472
10473 /* Commit the new RSP value. */
10474 if (rc == VINF_SUCCESS)
10475 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10476 }
10477
10478 return rc;
10479}
10480
10481
10482/**
10483 * Pops a dword from the stack.
10484 *
10485 * @returns Strict VBox status code.
10486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10487 * @param pu32Value Where to store the popped value.
10488 */
10489IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10490{
10491 /* Increment the stack pointer. */
10492 uint64_t uNewRsp;
10493 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10494
10495 /* Write the word the lazy way. */
10496 uint32_t const *pu32Src;
10497 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10498 if (rc == VINF_SUCCESS)
10499 {
10500 *pu32Value = *pu32Src;
10501 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10502
10503 /* Commit the new RSP value. */
10504 if (rc == VINF_SUCCESS)
10505 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10506 }
10507
10508 return rc;
10509}
10510
10511
10512/**
10513 * Pops a qword from the stack.
10514 *
10515 * @returns Strict VBox status code.
10516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10517 * @param pu64Value Where to store the popped value.
10518 */
10519IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10520{
10521 /* Increment the stack pointer. */
10522 uint64_t uNewRsp;
10523 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10524
10525 /* Write the word the lazy way. */
10526 uint64_t const *pu64Src;
10527 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10528 if (rc == VINF_SUCCESS)
10529 {
10530 *pu64Value = *pu64Src;
10531 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10532
10533 /* Commit the new RSP value. */
10534 if (rc == VINF_SUCCESS)
10535 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10536 }
10537
10538 return rc;
10539}
10540
10541
10542/**
10543 * Pushes a word onto the stack, using a temporary stack pointer.
10544 *
10545 * @returns Strict VBox status code.
10546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10547 * @param u16Value The value to push.
10548 * @param pTmpRsp Pointer to the temporary stack pointer.
10549 */
10550IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10551{
10552 /* Increment the stack pointer. */
10553 RTUINT64U NewRsp = *pTmpRsp;
10554 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10555
10556 /* Write the word the lazy way. */
10557 uint16_t *pu16Dst;
10558 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10559 if (rc == VINF_SUCCESS)
10560 {
10561 *pu16Dst = u16Value;
10562 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10563 }
10564
10565 /* Commit the new RSP value unless we an access handler made trouble. */
10566 if (rc == VINF_SUCCESS)
10567 *pTmpRsp = NewRsp;
10568
10569 return rc;
10570}
10571
10572
10573/**
10574 * Pushes a dword onto the stack, using a temporary stack pointer.
10575 *
10576 * @returns Strict VBox status code.
10577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10578 * @param u32Value The value to push.
10579 * @param pTmpRsp Pointer to the temporary stack pointer.
10580 */
10581IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10582{
10583 /* Increment the stack pointer. */
10584 RTUINT64U NewRsp = *pTmpRsp;
10585 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10586
10587 /* Write the word the lazy way. */
10588 uint32_t *pu32Dst;
10589 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10590 if (rc == VINF_SUCCESS)
10591 {
10592 *pu32Dst = u32Value;
10593 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10594 }
10595
10596 /* Commit the new RSP value unless we an access handler made trouble. */
10597 if (rc == VINF_SUCCESS)
10598 *pTmpRsp = NewRsp;
10599
10600 return rc;
10601}
10602
10603
10604/**
10605 * Pushes a dword onto the stack, using a temporary stack pointer.
10606 *
10607 * @returns Strict VBox status code.
10608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10609 * @param u64Value The value to push.
10610 * @param pTmpRsp Pointer to the temporary stack pointer.
10611 */
10612IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10613{
10614 /* Increment the stack pointer. */
10615 RTUINT64U NewRsp = *pTmpRsp;
10616 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10617
10618 /* Write the word the lazy way. */
10619 uint64_t *pu64Dst;
10620 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10621 if (rc == VINF_SUCCESS)
10622 {
10623 *pu64Dst = u64Value;
10624 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10625 }
10626
10627 /* Commit the new RSP value unless we an access handler made trouble. */
10628 if (rc == VINF_SUCCESS)
10629 *pTmpRsp = NewRsp;
10630
10631 return rc;
10632}
10633
10634
10635/**
10636 * Pops a word from the stack, using a temporary stack pointer.
10637 *
10638 * @returns Strict VBox status code.
10639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10640 * @param pu16Value Where to store the popped value.
10641 * @param pTmpRsp Pointer to the temporary stack pointer.
10642 */
10643IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10644{
10645 /* Increment the stack pointer. */
10646 RTUINT64U NewRsp = *pTmpRsp;
10647 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10648
10649 /* Write the word the lazy way. */
10650 uint16_t const *pu16Src;
10651 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10652 if (rc == VINF_SUCCESS)
10653 {
10654 *pu16Value = *pu16Src;
10655 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10656
10657 /* Commit the new RSP value. */
10658 if (rc == VINF_SUCCESS)
10659 *pTmpRsp = NewRsp;
10660 }
10661
10662 return rc;
10663}
10664
10665
10666/**
10667 * Pops a dword from the stack, using a temporary stack pointer.
10668 *
10669 * @returns Strict VBox status code.
10670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10671 * @param pu32Value Where to store the popped value.
10672 * @param pTmpRsp Pointer to the temporary stack pointer.
10673 */
10674IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10675{
10676 /* Increment the stack pointer. */
10677 RTUINT64U NewRsp = *pTmpRsp;
10678 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10679
10680 /* Write the word the lazy way. */
10681 uint32_t const *pu32Src;
10682 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10683 if (rc == VINF_SUCCESS)
10684 {
10685 *pu32Value = *pu32Src;
10686 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10687
10688 /* Commit the new RSP value. */
10689 if (rc == VINF_SUCCESS)
10690 *pTmpRsp = NewRsp;
10691 }
10692
10693 return rc;
10694}
10695
10696
10697/**
10698 * Pops a qword from the stack, using a temporary stack pointer.
10699 *
10700 * @returns Strict VBox status code.
10701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10702 * @param pu64Value Where to store the popped value.
10703 * @param pTmpRsp Pointer to the temporary stack pointer.
10704 */
10705IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10706{
10707 /* Increment the stack pointer. */
10708 RTUINT64U NewRsp = *pTmpRsp;
10709 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10710
10711 /* Write the word the lazy way. */
10712 uint64_t const *pu64Src;
10713 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10714 if (rcStrict == VINF_SUCCESS)
10715 {
10716 *pu64Value = *pu64Src;
10717 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10718
10719 /* Commit the new RSP value. */
10720 if (rcStrict == VINF_SUCCESS)
10721 *pTmpRsp = NewRsp;
10722 }
10723
10724 return rcStrict;
10725}
10726
10727
10728/**
10729 * Begin a special stack push (used by interrupt, exceptions and such).
10730 *
10731 * This will raise \#SS or \#PF if appropriate.
10732 *
10733 * @returns Strict VBox status code.
10734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10735 * @param cbMem The number of bytes to push onto the stack.
10736 * @param ppvMem Where to return the pointer to the stack memory.
10737 * As with the other memory functions this could be
10738 * direct access or bounce buffered access, so
10739 * don't commit register until the commit call
10740 * succeeds.
10741 * @param puNewRsp Where to return the new RSP value. This must be
10742 * passed unchanged to
10743 * iemMemStackPushCommitSpecial().
10744 */
10745IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10746{
10747 Assert(cbMem < UINT8_MAX);
10748 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10749 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10750}
10751
10752
10753/**
10754 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10755 *
10756 * This will update the rSP.
10757 *
10758 * @returns Strict VBox status code.
10759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10760 * @param pvMem The pointer returned by
10761 * iemMemStackPushBeginSpecial().
10762 * @param uNewRsp The new RSP value returned by
10763 * iemMemStackPushBeginSpecial().
10764 */
10765IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10766{
10767 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10768 if (rcStrict == VINF_SUCCESS)
10769 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10770 return rcStrict;
10771}
10772
10773
10774/**
10775 * Begin a special stack pop (used by iret, retf and such).
10776 *
10777 * This will raise \#SS or \#PF if appropriate.
10778 *
10779 * @returns Strict VBox status code.
10780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10781 * @param cbMem The number of bytes to pop from the stack.
10782 * @param ppvMem Where to return the pointer to the stack memory.
10783 * @param puNewRsp Where to return the new RSP value. This must be
10784 * assigned to CPUMCTX::rsp manually some time
10785 * after iemMemStackPopDoneSpecial() has been
10786 * called.
10787 */
10788IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10789{
10790 Assert(cbMem < UINT8_MAX);
10791 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10792 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10793}
10794
10795
10796/**
10797 * Continue a special stack pop (used by iret and retf).
10798 *
10799 * This will raise \#SS or \#PF if appropriate.
10800 *
10801 * @returns Strict VBox status code.
10802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10803 * @param cbMem The number of bytes to pop from the stack.
10804 * @param ppvMem Where to return the pointer to the stack memory.
10805 * @param puNewRsp Where to return the new RSP value. This must be
10806 * assigned to CPUMCTX::rsp manually some time
10807 * after iemMemStackPopDoneSpecial() has been
10808 * called.
10809 */
10810IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10811{
10812 Assert(cbMem < UINT8_MAX);
10813 RTUINT64U NewRsp;
10814 NewRsp.u = *puNewRsp;
10815 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10816 *puNewRsp = NewRsp.u;
10817 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10818}
10819
10820
10821/**
10822 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10823 * iemMemStackPopContinueSpecial).
10824 *
10825 * The caller will manually commit the rSP.
10826 *
10827 * @returns Strict VBox status code.
10828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10829 * @param pvMem The pointer returned by
10830 * iemMemStackPopBeginSpecial() or
10831 * iemMemStackPopContinueSpecial().
10832 */
10833IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10834{
10835 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10836}
10837
10838
10839/**
10840 * Fetches a system table byte.
10841 *
10842 * @returns Strict VBox status code.
10843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10844 * @param pbDst Where to return the byte.
10845 * @param iSegReg The index of the segment register to use for
10846 * this access. The base and limits are checked.
10847 * @param GCPtrMem The address of the guest memory.
10848 */
10849IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10850{
10851 /* The lazy approach for now... */
10852 uint8_t const *pbSrc;
10853 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10854 if (rc == VINF_SUCCESS)
10855 {
10856 *pbDst = *pbSrc;
10857 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10858 }
10859 return rc;
10860}
10861
10862
10863/**
10864 * Fetches a system table word.
10865 *
10866 * @returns Strict VBox status code.
10867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10868 * @param pu16Dst Where to return the word.
10869 * @param iSegReg The index of the segment register to use for
10870 * this access. The base and limits are checked.
10871 * @param GCPtrMem The address of the guest memory.
10872 */
10873IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10874{
10875 /* The lazy approach for now... */
10876 uint16_t const *pu16Src;
10877 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10878 if (rc == VINF_SUCCESS)
10879 {
10880 *pu16Dst = *pu16Src;
10881 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10882 }
10883 return rc;
10884}
10885
10886
10887/**
10888 * Fetches a system table dword.
10889 *
10890 * @returns Strict VBox status code.
10891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10892 * @param pu32Dst Where to return the dword.
10893 * @param iSegReg The index of the segment register to use for
10894 * this access. The base and limits are checked.
10895 * @param GCPtrMem The address of the guest memory.
10896 */
10897IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10898{
10899 /* The lazy approach for now... */
10900 uint32_t const *pu32Src;
10901 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10902 if (rc == VINF_SUCCESS)
10903 {
10904 *pu32Dst = *pu32Src;
10905 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10906 }
10907 return rc;
10908}
10909
10910
10911/**
10912 * Fetches a system table qword.
10913 *
10914 * @returns Strict VBox status code.
10915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10916 * @param pu64Dst Where to return the qword.
10917 * @param iSegReg The index of the segment register to use for
10918 * this access. The base and limits are checked.
10919 * @param GCPtrMem The address of the guest memory.
10920 */
10921IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10922{
10923 /* The lazy approach for now... */
10924 uint64_t const *pu64Src;
10925 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10926 if (rc == VINF_SUCCESS)
10927 {
10928 *pu64Dst = *pu64Src;
10929 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10930 }
10931 return rc;
10932}
10933
10934
10935/**
10936 * Fetches a descriptor table entry with caller specified error code.
10937 *
10938 * @returns Strict VBox status code.
10939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10940 * @param pDesc Where to return the descriptor table entry.
10941 * @param uSel The selector which table entry to fetch.
10942 * @param uXcpt The exception to raise on table lookup error.
10943 * @param uErrorCode The error code associated with the exception.
10944 */
10945IEM_STATIC VBOXSTRICTRC
10946iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10947{
10948 AssertPtr(pDesc);
10949 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10950
10951 /** @todo did the 286 require all 8 bytes to be accessible? */
10952 /*
10953 * Get the selector table base and check bounds.
10954 */
10955 RTGCPTR GCPtrBase;
10956 if (uSel & X86_SEL_LDT)
10957 {
10958 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10959 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10960 {
10961 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10962 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10963 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10964 uErrorCode, 0);
10965 }
10966
10967 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10968 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10969 }
10970 else
10971 {
10972 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10973 {
10974 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10975 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10976 uErrorCode, 0);
10977 }
10978 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10979 }
10980
10981 /*
10982 * Read the legacy descriptor and maybe the long mode extensions if
10983 * required.
10984 */
10985 VBOXSTRICTRC rcStrict;
10986 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10987 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10988 else
10989 {
10990 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10991 if (rcStrict == VINF_SUCCESS)
10992 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10993 if (rcStrict == VINF_SUCCESS)
10994 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10995 if (rcStrict == VINF_SUCCESS)
10996 pDesc->Legacy.au16[3] = 0;
10997 else
10998 return rcStrict;
10999 }
11000
11001 if (rcStrict == VINF_SUCCESS)
11002 {
11003 if ( !IEM_IS_LONG_MODE(pVCpu)
11004 || pDesc->Legacy.Gen.u1DescType)
11005 pDesc->Long.au64[1] = 0;
11006 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11007 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11008 else
11009 {
11010 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11011 /** @todo is this the right exception? */
11012 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11013 }
11014 }
11015 return rcStrict;
11016}
11017
11018
11019/**
11020 * Fetches a descriptor table entry.
11021 *
11022 * @returns Strict VBox status code.
11023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11024 * @param pDesc Where to return the descriptor table entry.
11025 * @param uSel The selector which table entry to fetch.
11026 * @param uXcpt The exception to raise on table lookup error.
11027 */
11028IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11029{
11030 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11031}
11032
11033
11034/**
11035 * Fakes a long mode stack selector for SS = 0.
11036 *
11037 * @param pDescSs Where to return the fake stack descriptor.
11038 * @param uDpl The DPL we want.
11039 */
11040IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11041{
11042 pDescSs->Long.au64[0] = 0;
11043 pDescSs->Long.au64[1] = 0;
11044 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11045 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11046 pDescSs->Long.Gen.u2Dpl = uDpl;
11047 pDescSs->Long.Gen.u1Present = 1;
11048 pDescSs->Long.Gen.u1Long = 1;
11049}
11050
11051
11052/**
11053 * Marks the selector descriptor as accessed (only non-system descriptors).
11054 *
11055 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11056 * will therefore skip the limit checks.
11057 *
11058 * @returns Strict VBox status code.
11059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11060 * @param uSel The selector.
11061 */
11062IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11063{
11064 /*
11065 * Get the selector table base and calculate the entry address.
11066 */
11067 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11068 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11069 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11070 GCPtr += uSel & X86_SEL_MASK;
11071
11072 /*
11073 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11074 * ugly stuff to avoid this. This will make sure it's an atomic access
11075 * as well more or less remove any question about 8-bit or 32-bit accesss.
11076 */
11077 VBOXSTRICTRC rcStrict;
11078 uint32_t volatile *pu32;
11079 if ((GCPtr & 3) == 0)
11080 {
11081 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11082 GCPtr += 2 + 2;
11083 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11084 if (rcStrict != VINF_SUCCESS)
11085 return rcStrict;
11086 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11087 }
11088 else
11089 {
11090 /* The misaligned GDT/LDT case, map the whole thing. */
11091 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11092 if (rcStrict != VINF_SUCCESS)
11093 return rcStrict;
11094 switch ((uintptr_t)pu32 & 3)
11095 {
11096 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11097 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11098 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11099 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11100 }
11101 }
11102
11103 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11104}
11105
11106/** @} */
11107
11108
11109/*
11110 * Include the C/C++ implementation of instruction.
11111 */
11112#include "IEMAllCImpl.cpp.h"
11113
11114
11115
11116/** @name "Microcode" macros.
11117 *
11118 * The idea is that we should be able to use the same code to interpret
11119 * instructions as well as recompiler instructions. Thus this obfuscation.
11120 *
11121 * @{
11122 */
11123#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11124#define IEM_MC_END() }
11125#define IEM_MC_PAUSE() do {} while (0)
11126#define IEM_MC_CONTINUE() do {} while (0)
11127
11128/** Internal macro. */
11129#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11130 do \
11131 { \
11132 VBOXSTRICTRC rcStrict2 = a_Expr; \
11133 if (rcStrict2 != VINF_SUCCESS) \
11134 return rcStrict2; \
11135 } while (0)
11136
11137
11138#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11139#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11140#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11141#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11142#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11143#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11144#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11145#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11146#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11147 do { \
11148 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11149 return iemRaiseDeviceNotAvailable(pVCpu); \
11150 } while (0)
11151#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11152 do { \
11153 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11154 return iemRaiseDeviceNotAvailable(pVCpu); \
11155 } while (0)
11156#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11157 do { \
11158 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11159 return iemRaiseMathFault(pVCpu); \
11160 } while (0)
11161#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11162 do { \
11163 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11164 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11165 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11166 return iemRaiseUndefinedOpcode(pVCpu); \
11167 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11168 return iemRaiseDeviceNotAvailable(pVCpu); \
11169 } while (0)
11170#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11171 do { \
11172 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11173 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11174 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11175 return iemRaiseUndefinedOpcode(pVCpu); \
11176 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11177 return iemRaiseDeviceNotAvailable(pVCpu); \
11178 } while (0)
11179#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11180 do { \
11181 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11182 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11183 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11184 return iemRaiseUndefinedOpcode(pVCpu); \
11185 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11186 return iemRaiseDeviceNotAvailable(pVCpu); \
11187 } while (0)
11188#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11189 do { \
11190 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11191 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11192 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11193 return iemRaiseUndefinedOpcode(pVCpu); \
11194 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11195 return iemRaiseDeviceNotAvailable(pVCpu); \
11196 } while (0)
11197#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11198 do { \
11199 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11200 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11201 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11202 return iemRaiseUndefinedOpcode(pVCpu); \
11203 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11204 return iemRaiseDeviceNotAvailable(pVCpu); \
11205 } while (0)
11206#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11207 do { \
11208 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11209 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11210 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11211 return iemRaiseUndefinedOpcode(pVCpu); \
11212 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11213 return iemRaiseDeviceNotAvailable(pVCpu); \
11214 } while (0)
11215#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11216 do { \
11217 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11218 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11219 return iemRaiseUndefinedOpcode(pVCpu); \
11220 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11221 return iemRaiseDeviceNotAvailable(pVCpu); \
11222 } while (0)
11223#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11224 do { \
11225 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11226 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11227 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11228 return iemRaiseUndefinedOpcode(pVCpu); \
11229 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11230 return iemRaiseDeviceNotAvailable(pVCpu); \
11231 } while (0)
11232#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11233 do { \
11234 if (pVCpu->iem.s.uCpl != 0) \
11235 return iemRaiseGeneralProtectionFault0(pVCpu); \
11236 } while (0)
11237#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11238 do { \
11239 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11240 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11241 } while (0)
11242#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11243 do { \
11244 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11245 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11246 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11247 return iemRaiseUndefinedOpcode(pVCpu); \
11248 } while (0)
11249#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11250 do { \
11251 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11252 return iemRaiseGeneralProtectionFault0(pVCpu); \
11253 } while (0)
11254
11255
11256#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11257#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11258#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11259#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11260#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11261#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11262#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11263 uint32_t a_Name; \
11264 uint32_t *a_pName = &a_Name
11265#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11266 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11267
11268#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11269#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11270
11271#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11272#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11273#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11274#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11275#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11276#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11277#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11278#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11279#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11280#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11281#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11282#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11283#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11284#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11285#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11286#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11287#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11288#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11289 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11290 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11291 } while (0)
11292#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11293 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11294 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11295 } while (0)
11296#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11297 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11298 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11299 } while (0)
11300/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11301#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11302 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11303 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11304 } while (0)
11305#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11306 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11307 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11308 } while (0)
11309/** @note Not for IOPL or IF testing or modification. */
11310#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11311#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11312#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11313#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11314
11315#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11316#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11317#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11318#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11319#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11320#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11321#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11322#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11323#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11324#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11325/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11326#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11327 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11328 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11329 } while (0)
11330#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11331 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11332 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11333 } while (0)
11334#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11335 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11336
11337
11338#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11339#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11340/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11341 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11342#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11343#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11344/** @note Not for IOPL or IF testing or modification. */
11345#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11346
11347#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11348#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11349#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11350 do { \
11351 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11352 *pu32Reg += (a_u32Value); \
11353 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11354 } while (0)
11355#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11356
11357#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11358#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11359#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11360 do { \
11361 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11362 *pu32Reg -= (a_u32Value); \
11363 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11364 } while (0)
11365#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11366#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11367
11368#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11369#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11370#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11371#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11372#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11373#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11374#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11375
11376#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11377#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11378#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11379#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11380
11381#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11382#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11383#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11384
11385#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11386#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11387#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11388
11389#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11390#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11391#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11392
11393#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11394#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11395#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11396
11397#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11398
11399#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11400
11401#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11402#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11403#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11404 do { \
11405 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11406 *pu32Reg &= (a_u32Value); \
11407 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11408 } while (0)
11409#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11410
11411#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11412#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11413#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11414 do { \
11415 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11416 *pu32Reg |= (a_u32Value); \
11417 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11418 } while (0)
11419#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11420
11421
11422/** @note Not for IOPL or IF modification. */
11423#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11424/** @note Not for IOPL or IF modification. */
11425#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11426/** @note Not for IOPL or IF modification. */
11427#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11428
11429#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11430
11431/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11432#define IEM_MC_FPU_TO_MMX_MODE() do { \
11433 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11434 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11435 } while (0)
11436
11437/** Switches the FPU state from MMX mode (FTW=0xffff). */
11438#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11439 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11440 } while (0)
11441
11442#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11443 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11444#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11445 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11446#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11447 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11448 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11449 } while (0)
11450#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11451 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11452 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11453 } while (0)
11454#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11455 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11456#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11457 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11458#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11459 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11460
11461#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11462 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11463 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11464 } while (0)
11465#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11466 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11467#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11468 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11469#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11470 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11471#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11472 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11473 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11474 } while (0)
11475#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11476 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11477#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11478 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11479 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11480 } while (0)
11481#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11482 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11483#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11484 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11485 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11486 } while (0)
11487#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11488 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11489#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11490 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11491#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11492 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11493#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11494 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11495#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11496 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11497 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11498 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11499 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11500 } while (0)
11501
11502#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11503 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11504 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11505 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11506 } while (0)
11507#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11508 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11509 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11510 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11511 } while (0)
11512#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11513 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11514 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11515 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11516 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11517 } while (0)
11518#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11519 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11520 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11521 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11522 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11523 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11524 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11525 } while (0)
11526
11527#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11528#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11529 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11530 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11532 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11533 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11534 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11535 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11536 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11537 } while (0)
11538#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11539 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11540 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11542 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11544 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11545 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11546 } while (0)
11547#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11548 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11549 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11550 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11551 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11552 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11553 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11554 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11555 } while (0)
11556#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11557 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11558 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11559 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11560 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11561 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11562 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11563 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11564 } while (0)
11565
11566#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11567 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11568#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11569 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11570#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11571 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11572#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11573 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11574 uintptr_t const iYRegTmp = (a_iYReg); \
11575 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11577 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11578 } while (0)
11579
11580#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11581 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11582 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11583 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11584 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11586 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11588 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11589 } while (0)
11590#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11591 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11592 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11593 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11594 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11595 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11596 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11598 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11599 } while (0)
11600#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11601 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11602 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11603 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11604 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11605 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11606 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11607 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11608 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11609 } while (0)
11610
11611#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11612 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11613 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11614 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11615 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11616 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11617 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11618 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11619 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11620 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11621 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11622 } while (0)
11623#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11624 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11625 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11626 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11627 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11628 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11629 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11630 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11631 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11632 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11633 } while (0)
11634#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11635 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11636 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11637 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11638 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11639 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11640 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11641 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11642 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11643 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11644 } while (0)
11645#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11646 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11647 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11648 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11649 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11650 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11651 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11652 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11653 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11654 } while (0)
11655
11656#ifndef IEM_WITH_SETJMP
11657# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11659# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11661# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11663#else
11664# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11665 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11666# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11667 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11668# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11669 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11670#endif
11671
11672#ifndef IEM_WITH_SETJMP
11673# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11675# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11677# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11679#else
11680# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11681 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11683 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11684# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11685 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11686#endif
11687
11688#ifndef IEM_WITH_SETJMP
11689# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11691# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11693# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11695#else
11696# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11697 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11698# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11699 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11700# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11701 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11702#endif
11703
11704#ifdef SOME_UNUSED_FUNCTION
11705# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11707#endif
11708
11709#ifndef IEM_WITH_SETJMP
11710# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11714# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11716# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11718#else
11719# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11720 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11721# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11722 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11723# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11724 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11725# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11726 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11727#endif
11728
11729#ifndef IEM_WITH_SETJMP
11730# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11732# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11734# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11736#else
11737# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11738 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11739# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11740 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11741# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11742 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11743#endif
11744
11745#ifndef IEM_WITH_SETJMP
11746# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11749 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11750#else
11751# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11752 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11753# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11754 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11755#endif
11756
11757#ifndef IEM_WITH_SETJMP
11758# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11760# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11761 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11762#else
11763# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11764 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11765# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11766 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11767#endif
11768
11769
11770
11771#ifndef IEM_WITH_SETJMP
11772# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11773 do { \
11774 uint8_t u8Tmp; \
11775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11776 (a_u16Dst) = u8Tmp; \
11777 } while (0)
11778# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11779 do { \
11780 uint8_t u8Tmp; \
11781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11782 (a_u32Dst) = u8Tmp; \
11783 } while (0)
11784# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11785 do { \
11786 uint8_t u8Tmp; \
11787 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11788 (a_u64Dst) = u8Tmp; \
11789 } while (0)
11790# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11791 do { \
11792 uint16_t u16Tmp; \
11793 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11794 (a_u32Dst) = u16Tmp; \
11795 } while (0)
11796# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11797 do { \
11798 uint16_t u16Tmp; \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11800 (a_u64Dst) = u16Tmp; \
11801 } while (0)
11802# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11803 do { \
11804 uint32_t u32Tmp; \
11805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11806 (a_u64Dst) = u32Tmp; \
11807 } while (0)
11808#else /* IEM_WITH_SETJMP */
11809# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11810 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11811# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11812 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11813# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11814 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11815# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11816 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11817# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11818 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11819# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11820 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11821#endif /* IEM_WITH_SETJMP */
11822
11823#ifndef IEM_WITH_SETJMP
11824# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11825 do { \
11826 uint8_t u8Tmp; \
11827 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11828 (a_u16Dst) = (int8_t)u8Tmp; \
11829 } while (0)
11830# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11831 do { \
11832 uint8_t u8Tmp; \
11833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11834 (a_u32Dst) = (int8_t)u8Tmp; \
11835 } while (0)
11836# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11837 do { \
11838 uint8_t u8Tmp; \
11839 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11840 (a_u64Dst) = (int8_t)u8Tmp; \
11841 } while (0)
11842# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11843 do { \
11844 uint16_t u16Tmp; \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11846 (a_u32Dst) = (int16_t)u16Tmp; \
11847 } while (0)
11848# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11849 do { \
11850 uint16_t u16Tmp; \
11851 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11852 (a_u64Dst) = (int16_t)u16Tmp; \
11853 } while (0)
11854# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11855 do { \
11856 uint32_t u32Tmp; \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11858 (a_u64Dst) = (int32_t)u32Tmp; \
11859 } while (0)
11860#else /* IEM_WITH_SETJMP */
11861# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11862 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11863# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11864 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11865# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11866 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11867# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11868 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11869# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11870 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11871# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11872 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11873#endif /* IEM_WITH_SETJMP */
11874
11875#ifndef IEM_WITH_SETJMP
11876# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11878# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11880# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11882# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11883 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11884#else
11885# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11886 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11887# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11888 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11889# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11890 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11891# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11892 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11893#endif
11894
11895#ifndef IEM_WITH_SETJMP
11896# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11897 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11898# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11900# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11902# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11904#else
11905# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11906 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11907# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11908 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11909# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11910 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11911# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11912 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11913#endif
11914
11915#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11916#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11917#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11918#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11919#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11920#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11921#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11922 do { \
11923 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11924 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11925 } while (0)
11926
11927#ifndef IEM_WITH_SETJMP
11928# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11929 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11930# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11931 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11932#else
11933# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11934 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11935# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11936 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11937#endif
11938
11939#ifndef IEM_WITH_SETJMP
11940# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11941 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11942# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11943 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11944#else
11945# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11946 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11947# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11948 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11949#endif
11950
11951
11952#define IEM_MC_PUSH_U16(a_u16Value) \
11953 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11954#define IEM_MC_PUSH_U32(a_u32Value) \
11955 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11956#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11957 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11958#define IEM_MC_PUSH_U64(a_u64Value) \
11959 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11960
11961#define IEM_MC_POP_U16(a_pu16Value) \
11962 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11963#define IEM_MC_POP_U32(a_pu32Value) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11965#define IEM_MC_POP_U64(a_pu64Value) \
11966 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11967
11968/** Maps guest memory for direct or bounce buffered access.
11969 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11970 * @remarks May return.
11971 */
11972#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11973 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11974
11975/** Maps guest memory for direct or bounce buffered access.
11976 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11977 * @remarks May return.
11978 */
11979#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11980 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11981
11982/** Commits the memory and unmaps the guest memory.
11983 * @remarks May return.
11984 */
11985#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11987
11988/** Commits the memory and unmaps the guest memory unless the FPU status word
11989 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11990 * that would cause FLD not to store.
11991 *
11992 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11993 * store, while \#P will not.
11994 *
11995 * @remarks May in theory return - for now.
11996 */
11997#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11998 do { \
11999 if ( !(a_u16FSW & X86_FSW_ES) \
12000 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12001 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12002 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12003 } while (0)
12004
12005/** Calculate efficient address from R/M. */
12006#ifndef IEM_WITH_SETJMP
12007# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12008 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12009#else
12010# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12011 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12012#endif
12013
12014#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12015#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12016#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12017#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12018#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12019#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12020#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12021
12022/**
12023 * Defers the rest of the instruction emulation to a C implementation routine
12024 * and returns, only taking the standard parameters.
12025 *
12026 * @param a_pfnCImpl The pointer to the C routine.
12027 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12028 */
12029#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12030
12031/**
12032 * Defers the rest of instruction emulation to a C implementation routine and
12033 * returns, taking one argument in addition to the standard ones.
12034 *
12035 * @param a_pfnCImpl The pointer to the C routine.
12036 * @param a0 The argument.
12037 */
12038#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12039
12040/**
12041 * Defers the rest of the instruction emulation to a C implementation routine
12042 * and returns, taking two arguments in addition to the standard ones.
12043 *
12044 * @param a_pfnCImpl The pointer to the C routine.
12045 * @param a0 The first extra argument.
12046 * @param a1 The second extra argument.
12047 */
12048#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12049
12050/**
12051 * Defers the rest of the instruction emulation to a C implementation routine
12052 * and returns, taking three arguments in addition to the standard ones.
12053 *
12054 * @param a_pfnCImpl The pointer to the C routine.
12055 * @param a0 The first extra argument.
12056 * @param a1 The second extra argument.
12057 * @param a2 The third extra argument.
12058 */
12059#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12060
12061/**
12062 * Defers the rest of the instruction emulation to a C implementation routine
12063 * and returns, taking four arguments in addition to the standard ones.
12064 *
12065 * @param a_pfnCImpl The pointer to the C routine.
12066 * @param a0 The first extra argument.
12067 * @param a1 The second extra argument.
12068 * @param a2 The third extra argument.
12069 * @param a3 The fourth extra argument.
12070 */
12071#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12072
12073/**
12074 * Defers the rest of the instruction emulation to a C implementation routine
12075 * and returns, taking two arguments in addition to the standard ones.
12076 *
12077 * @param a_pfnCImpl The pointer to the C routine.
12078 * @param a0 The first extra argument.
12079 * @param a1 The second extra argument.
12080 * @param a2 The third extra argument.
12081 * @param a3 The fourth extra argument.
12082 * @param a4 The fifth extra argument.
12083 */
12084#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12085
12086/**
12087 * Defers the entire instruction emulation to a C implementation routine and
12088 * returns, only taking the standard parameters.
12089 *
12090 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12091 *
12092 * @param a_pfnCImpl The pointer to the C routine.
12093 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12094 */
12095#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12096
12097/**
12098 * Defers the entire instruction emulation to a C implementation routine and
12099 * returns, taking one argument in addition to the standard ones.
12100 *
12101 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12102 *
12103 * @param a_pfnCImpl The pointer to the C routine.
12104 * @param a0 The argument.
12105 */
12106#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12107
12108/**
12109 * Defers the entire instruction emulation to a C implementation routine and
12110 * returns, taking two arguments in addition to the standard ones.
12111 *
12112 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12113 *
12114 * @param a_pfnCImpl The pointer to the C routine.
12115 * @param a0 The first extra argument.
12116 * @param a1 The second extra argument.
12117 */
12118#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12119
12120/**
12121 * Defers the entire instruction emulation to a C implementation routine and
12122 * returns, taking three arguments in addition to the standard ones.
12123 *
12124 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12125 *
12126 * @param a_pfnCImpl The pointer to the C routine.
12127 * @param a0 The first extra argument.
12128 * @param a1 The second extra argument.
12129 * @param a2 The third extra argument.
12130 */
12131#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12132
12133/**
12134 * Calls a FPU assembly implementation taking one visible argument.
12135 *
12136 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12137 * @param a0 The first extra argument.
12138 */
12139#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12140 do { \
12141 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12142 } while (0)
12143
12144/**
12145 * Calls a FPU assembly implementation taking two visible arguments.
12146 *
12147 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12148 * @param a0 The first extra argument.
12149 * @param a1 The second extra argument.
12150 */
12151#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12152 do { \
12153 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12154 } while (0)
12155
12156/**
12157 * Calls a FPU assembly implementation taking three visible arguments.
12158 *
12159 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12160 * @param a0 The first extra argument.
12161 * @param a1 The second extra argument.
12162 * @param a2 The third extra argument.
12163 */
12164#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12165 do { \
12166 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12167 } while (0)
12168
12169#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12170 do { \
12171 (a_FpuData).FSW = (a_FSW); \
12172 (a_FpuData).r80Result = *(a_pr80Value); \
12173 } while (0)
12174
12175/** Pushes FPU result onto the stack. */
12176#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12177 iemFpuPushResult(pVCpu, &a_FpuData)
12178/** Pushes FPU result onto the stack and sets the FPUDP. */
12179#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12180 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12181
12182/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12183#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12184 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12185
12186/** Stores FPU result in a stack register. */
12187#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12188 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12189/** Stores FPU result in a stack register and pops the stack. */
12190#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12191 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12192/** Stores FPU result in a stack register and sets the FPUDP. */
12193#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12194 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12195/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12196 * stack. */
12197#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12198 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12199
12200/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12201#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12202 iemFpuUpdateOpcodeAndIp(pVCpu)
12203/** Free a stack register (for FFREE and FFREEP). */
12204#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12205 iemFpuStackFree(pVCpu, a_iStReg)
12206/** Increment the FPU stack pointer. */
12207#define IEM_MC_FPU_STACK_INC_TOP() \
12208 iemFpuStackIncTop(pVCpu)
12209/** Decrement the FPU stack pointer. */
12210#define IEM_MC_FPU_STACK_DEC_TOP() \
12211 iemFpuStackDecTop(pVCpu)
12212
12213/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12214#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12215 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12216/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12217#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12218 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12219/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12220#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12221 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12222/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12223#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12224 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12225/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12226 * stack. */
12227#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12228 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12229/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12230#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12231 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12232
12233/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12234#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12235 iemFpuStackUnderflow(pVCpu, a_iStDst)
12236/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12237 * stack. */
12238#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12239 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12240/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12241 * FPUDS. */
12242#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12243 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12244/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12245 * FPUDS. Pops stack. */
12246#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12247 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12248/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12249 * stack twice. */
12250#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12251 iemFpuStackUnderflowThenPopPop(pVCpu)
12252/** Raises a FPU stack underflow exception for an instruction pushing a result
12253 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12254#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12255 iemFpuStackPushUnderflow(pVCpu)
12256/** Raises a FPU stack underflow exception for an instruction pushing a result
12257 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12258#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12259 iemFpuStackPushUnderflowTwo(pVCpu)
12260
12261/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12262 * FPUIP, FPUCS and FOP. */
12263#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12264 iemFpuStackPushOverflow(pVCpu)
12265/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12266 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12267#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12268 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12269/** Prepares for using the FPU state.
12270 * Ensures that we can use the host FPU in the current context (RC+R0.
12271 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12272#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12273/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12274#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12275/** Actualizes the guest FPU state so it can be accessed and modified. */
12276#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12277
12278/** Prepares for using the SSE state.
12279 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12280 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12281#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12282/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12283#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12284/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12285#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12286
12287/** Prepares for using the AVX state.
12288 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12289 * Ensures the guest AVX state in the CPUMCTX is up to date.
12290 * @note This will include the AVX512 state too when support for it is added
12291 * due to the zero extending feature of VEX instruction. */
12292#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12293/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12294#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12295/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12296#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12297
12298/**
12299 * Calls a MMX assembly implementation taking two visible arguments.
12300 *
12301 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12302 * @param a0 The first extra argument.
12303 * @param a1 The second extra argument.
12304 */
12305#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12306 do { \
12307 IEM_MC_PREPARE_FPU_USAGE(); \
12308 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12309 } while (0)
12310
12311/**
12312 * Calls a MMX assembly implementation taking three visible arguments.
12313 *
12314 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12315 * @param a0 The first extra argument.
12316 * @param a1 The second extra argument.
12317 * @param a2 The third extra argument.
12318 */
12319#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12320 do { \
12321 IEM_MC_PREPARE_FPU_USAGE(); \
12322 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12323 } while (0)
12324
12325
12326/**
12327 * Calls a SSE assembly implementation taking two visible arguments.
12328 *
12329 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12330 * @param a0 The first extra argument.
12331 * @param a1 The second extra argument.
12332 */
12333#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12334 do { \
12335 IEM_MC_PREPARE_SSE_USAGE(); \
12336 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12337 } while (0)
12338
12339/**
12340 * Calls a SSE assembly implementation taking three visible arguments.
12341 *
12342 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12343 * @param a0 The first extra argument.
12344 * @param a1 The second extra argument.
12345 * @param a2 The third extra argument.
12346 */
12347#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12348 do { \
12349 IEM_MC_PREPARE_SSE_USAGE(); \
12350 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12351 } while (0)
12352
12353
12354/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12355 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12356#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12357 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12358
12359/**
12360 * Calls a AVX assembly implementation taking two visible arguments.
12361 *
12362 * There is one implicit zero'th argument, a pointer to the extended state.
12363 *
12364 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12365 * @param a1 The first extra argument.
12366 * @param a2 The second extra argument.
12367 */
12368#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12369 do { \
12370 IEM_MC_PREPARE_AVX_USAGE(); \
12371 a_pfnAImpl(pXState, (a1), (a2)); \
12372 } while (0)
12373
12374/**
12375 * Calls a AVX assembly implementation taking three visible arguments.
12376 *
12377 * There is one implicit zero'th argument, a pointer to the extended state.
12378 *
12379 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12380 * @param a1 The first extra argument.
12381 * @param a2 The second extra argument.
12382 * @param a3 The third extra argument.
12383 */
12384#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12385 do { \
12386 IEM_MC_PREPARE_AVX_USAGE(); \
12387 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12388 } while (0)
12389
12390/** @note Not for IOPL or IF testing. */
12391#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12392/** @note Not for IOPL or IF testing. */
12393#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12394/** @note Not for IOPL or IF testing. */
12395#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12396/** @note Not for IOPL or IF testing. */
12397#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12398/** @note Not for IOPL or IF testing. */
12399#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12400 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12401 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12402/** @note Not for IOPL or IF testing. */
12403#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12404 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12405 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12406/** @note Not for IOPL or IF testing. */
12407#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12408 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12409 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12410 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12411/** @note Not for IOPL or IF testing. */
12412#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12413 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12414 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12415 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12416#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12417#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12418#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12421 if ( pVCpu->cpum.GstCtx.cx != 0 \
12422 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12423/** @note Not for IOPL or IF testing. */
12424#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12425 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12426 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12429 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12430 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12431/** @note Not for IOPL or IF testing. */
12432#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12433 if ( pVCpu->cpum.GstCtx.cx != 0 \
12434 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12435/** @note Not for IOPL or IF testing. */
12436#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12437 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12438 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12439/** @note Not for IOPL or IF testing. */
12440#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12441 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12442 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12443#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12444#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12445
12446#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12447 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12448#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12449 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12450#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12451 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12452#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12453 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12454#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12455 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12456#define IEM_MC_IF_FCW_IM() \
12457 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12458
12459#define IEM_MC_ELSE() } else {
12460#define IEM_MC_ENDIF() } do {} while (0)
12461
12462/** @} */
12463
12464
12465/** @name Opcode Debug Helpers.
12466 * @{
12467 */
12468#ifdef VBOX_WITH_STATISTICS
12469# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12470#else
12471# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12472#endif
12473
12474#ifdef DEBUG
12475# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12476 do { \
12477 IEMOP_INC_STATS(a_Stats); \
12478 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12479 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12480 } while (0)
12481
12482# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12483 do { \
12484 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12485 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12486 (void)RT_CONCAT(OP_,a_Upper); \
12487 (void)(a_fDisHints); \
12488 (void)(a_fIemHints); \
12489 } while (0)
12490
12491# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12492 do { \
12493 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12494 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12495 (void)RT_CONCAT(OP_,a_Upper); \
12496 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12497 (void)(a_fDisHints); \
12498 (void)(a_fIemHints); \
12499 } while (0)
12500
12501# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12502 do { \
12503 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12504 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12505 (void)RT_CONCAT(OP_,a_Upper); \
12506 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12507 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12508 (void)(a_fDisHints); \
12509 (void)(a_fIemHints); \
12510 } while (0)
12511
12512# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12513 do { \
12514 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12515 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12516 (void)RT_CONCAT(OP_,a_Upper); \
12517 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12518 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12519 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12520 (void)(a_fDisHints); \
12521 (void)(a_fIemHints); \
12522 } while (0)
12523
12524# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12525 do { \
12526 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12527 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12528 (void)RT_CONCAT(OP_,a_Upper); \
12529 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12530 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12531 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12532 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12533 (void)(a_fDisHints); \
12534 (void)(a_fIemHints); \
12535 } while (0)
12536
12537#else
12538# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12539
12540# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12541 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12542# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12543 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12544# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12545 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12546# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12547 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12548# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12549 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12550
12551#endif
12552
12553#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12554 IEMOP_MNEMONIC0EX(a_Lower, \
12555 #a_Lower, \
12556 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12557#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12558 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12559 #a_Lower " " #a_Op1, \
12560 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12561#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12562 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12563 #a_Lower " " #a_Op1 "," #a_Op2, \
12564 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12565#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12566 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12567 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12568 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12569#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12570 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12571 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12572 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12573
12574/** @} */
12575
12576
12577/** @name Opcode Helpers.
12578 * @{
12579 */
12580
12581#ifdef IN_RING3
12582# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12583 do { \
12584 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12585 else \
12586 { \
12587 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12588 return IEMOP_RAISE_INVALID_OPCODE(); \
12589 } \
12590 } while (0)
12591#else
12592# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12593 do { \
12594 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12595 else return IEMOP_RAISE_INVALID_OPCODE(); \
12596 } while (0)
12597#endif
12598
12599/** The instruction requires a 186 or later. */
12600#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12601# define IEMOP_HLP_MIN_186() do { } while (0)
12602#else
12603# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12604#endif
12605
12606/** The instruction requires a 286 or later. */
12607#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12608# define IEMOP_HLP_MIN_286() do { } while (0)
12609#else
12610# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12611#endif
12612
12613/** The instruction requires a 386 or later. */
12614#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12615# define IEMOP_HLP_MIN_386() do { } while (0)
12616#else
12617# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12618#endif
12619
12620/** The instruction requires a 386 or later if the given expression is true. */
12621#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12622# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12623#else
12624# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12625#endif
12626
12627/** The instruction requires a 486 or later. */
12628#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12629# define IEMOP_HLP_MIN_486() do { } while (0)
12630#else
12631# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12632#endif
12633
12634/** The instruction requires a Pentium (586) or later. */
12635#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12636# define IEMOP_HLP_MIN_586() do { } while (0)
12637#else
12638# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12639#endif
12640
12641/** The instruction requires a PentiumPro (686) or later. */
12642#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12643# define IEMOP_HLP_MIN_686() do { } while (0)
12644#else
12645# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12646#endif
12647
12648
12649/** The instruction raises an \#UD in real and V8086 mode. */
12650#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12651 do \
12652 { \
12653 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12654 else return IEMOP_RAISE_INVALID_OPCODE(); \
12655 } while (0)
12656
12657#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12658/** This instruction raises an \#UD in real and V8086 mode or when not using a
12659 * 64-bit code segment when in long mode (applicable to all VMX instructions
12660 * except VMCALL).
12661 */
12662#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12663 do \
12664 { \
12665 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12666 && ( !IEM_IS_LONG_MODE(pVCpu) \
12667 || IEM_IS_64BIT_CODE(pVCpu))) \
12668 { /* likely */ } \
12669 else \
12670 { \
12671 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12672 { \
12673 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12674 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12675 return IEMOP_RAISE_INVALID_OPCODE(); \
12676 } \
12677 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12678 { \
12679 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12680 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12681 return IEMOP_RAISE_INVALID_OPCODE(); \
12682 } \
12683 } \
12684 } while (0)
12685
12686/** The instruction can only be executed in VMX operation (VMX root mode and
12687 * non-root mode).
12688 *
12689 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12690 */
12691# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12692 do \
12693 { \
12694 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12695 else \
12696 { \
12697 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12698 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12699 return IEMOP_RAISE_INVALID_OPCODE(); \
12700 } \
12701 } while (0)
12702#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12703
12704/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12705 * 64-bit mode. */
12706#define IEMOP_HLP_NO_64BIT() \
12707 do \
12708 { \
12709 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12710 return IEMOP_RAISE_INVALID_OPCODE(); \
12711 } while (0)
12712
12713/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12714 * 64-bit mode. */
12715#define IEMOP_HLP_ONLY_64BIT() \
12716 do \
12717 { \
12718 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12719 return IEMOP_RAISE_INVALID_OPCODE(); \
12720 } while (0)
12721
12722/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12723#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12724 do \
12725 { \
12726 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12727 iemRecalEffOpSize64Default(pVCpu); \
12728 } while (0)
12729
12730/** The instruction has 64-bit operand size if 64-bit mode. */
12731#define IEMOP_HLP_64BIT_OP_SIZE() \
12732 do \
12733 { \
12734 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12735 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12736 } while (0)
12737
12738/** Only a REX prefix immediately preceeding the first opcode byte takes
12739 * effect. This macro helps ensuring this as well as logging bad guest code. */
12740#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12741 do \
12742 { \
12743 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12744 { \
12745 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12746 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12747 pVCpu->iem.s.uRexB = 0; \
12748 pVCpu->iem.s.uRexIndex = 0; \
12749 pVCpu->iem.s.uRexReg = 0; \
12750 iemRecalEffOpSize(pVCpu); \
12751 } \
12752 } while (0)
12753
12754/**
12755 * Done decoding.
12756 */
12757#define IEMOP_HLP_DONE_DECODING() \
12758 do \
12759 { \
12760 /*nothing for now, maybe later... */ \
12761 } while (0)
12762
12763/**
12764 * Done decoding, raise \#UD exception if lock prefix present.
12765 */
12766#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12767 do \
12768 { \
12769 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12770 { /* likely */ } \
12771 else \
12772 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12773 } while (0)
12774
12775
12776/**
12777 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12778 * repnz or size prefixes are present, or if in real or v8086 mode.
12779 */
12780#define IEMOP_HLP_DONE_VEX_DECODING() \
12781 do \
12782 { \
12783 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12784 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12785 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12786 { /* likely */ } \
12787 else \
12788 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12789 } while (0)
12790
12791/**
12792 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12793 * repnz or size prefixes are present, or if in real or v8086 mode.
12794 */
12795#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12796 do \
12797 { \
12798 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12799 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12800 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12801 && pVCpu->iem.s.uVexLength == 0)) \
12802 { /* likely */ } \
12803 else \
12804 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12805 } while (0)
12806
12807
12808/**
12809 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12810 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12811 * register 0, or if in real or v8086 mode.
12812 */
12813#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12814 do \
12815 { \
12816 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12817 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12818 && !pVCpu->iem.s.uVex3rdReg \
12819 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12820 { /* likely */ } \
12821 else \
12822 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12823 } while (0)
12824
12825/**
12826 * Done decoding VEX, no V, L=0.
12827 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12828 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12829 */
12830#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12831 do \
12832 { \
12833 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12834 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12835 && pVCpu->iem.s.uVexLength == 0 \
12836 && pVCpu->iem.s.uVex3rdReg == 0 \
12837 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12838 { /* likely */ } \
12839 else \
12840 return IEMOP_RAISE_INVALID_OPCODE(); \
12841 } while (0)
12842
12843#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12844 do \
12845 { \
12846 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12847 { /* likely */ } \
12848 else \
12849 { \
12850 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12851 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12852 } \
12853 } while (0)
12854#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12855 do \
12856 { \
12857 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12858 { /* likely */ } \
12859 else \
12860 { \
12861 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12862 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12863 } \
12864 } while (0)
12865
12866/**
12867 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12868 * are present.
12869 */
12870#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12871 do \
12872 { \
12873 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12874 { /* likely */ } \
12875 else \
12876 return IEMOP_RAISE_INVALID_OPCODE(); \
12877 } while (0)
12878
12879/**
12880 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12881 * prefixes are present.
12882 */
12883#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12884 do \
12885 { \
12886 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12887 { /* likely */ } \
12888 else \
12889 return IEMOP_RAISE_INVALID_OPCODE(); \
12890 } while (0)
12891
12892
12893/**
12894 * Calculates the effective address of a ModR/M memory operand.
12895 *
12896 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12897 *
12898 * @return Strict VBox status code.
12899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12900 * @param bRm The ModRM byte.
12901 * @param cbImm The size of any immediate following the
12902 * effective address opcode bytes. Important for
12903 * RIP relative addressing.
12904 * @param pGCPtrEff Where to return the effective address.
12905 */
12906IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12907{
12908 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12909# define SET_SS_DEF() \
12910 do \
12911 { \
12912 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12913 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12914 } while (0)
12915
12916 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12917 {
12918/** @todo Check the effective address size crap! */
12919 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12920 {
12921 uint16_t u16EffAddr;
12922
12923 /* Handle the disp16 form with no registers first. */
12924 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12925 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12926 else
12927 {
12928 /* Get the displacment. */
12929 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12930 {
12931 case 0: u16EffAddr = 0; break;
12932 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12933 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12934 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12935 }
12936
12937 /* Add the base and index registers to the disp. */
12938 switch (bRm & X86_MODRM_RM_MASK)
12939 {
12940 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12941 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12942 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12943 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12944 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12945 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12946 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12947 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12948 }
12949 }
12950
12951 *pGCPtrEff = u16EffAddr;
12952 }
12953 else
12954 {
12955 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12956 uint32_t u32EffAddr;
12957
12958 /* Handle the disp32 form with no registers first. */
12959 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12960 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12961 else
12962 {
12963 /* Get the register (or SIB) value. */
12964 switch ((bRm & X86_MODRM_RM_MASK))
12965 {
12966 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12967 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12968 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12969 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12970 case 4: /* SIB */
12971 {
12972 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12973
12974 /* Get the index and scale it. */
12975 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12976 {
12977 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12978 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12979 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12980 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12981 case 4: u32EffAddr = 0; /*none */ break;
12982 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12983 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12984 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12985 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12986 }
12987 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12988
12989 /* add base */
12990 switch (bSib & X86_SIB_BASE_MASK)
12991 {
12992 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12993 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12994 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12995 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12996 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12997 case 5:
12998 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12999 {
13000 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13001 SET_SS_DEF();
13002 }
13003 else
13004 {
13005 uint32_t u32Disp;
13006 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13007 u32EffAddr += u32Disp;
13008 }
13009 break;
13010 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13011 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13012 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13013 }
13014 break;
13015 }
13016 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13017 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13018 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13019 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13020 }
13021
13022 /* Get and add the displacement. */
13023 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13024 {
13025 case 0:
13026 break;
13027 case 1:
13028 {
13029 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13030 u32EffAddr += i8Disp;
13031 break;
13032 }
13033 case 2:
13034 {
13035 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13036 u32EffAddr += u32Disp;
13037 break;
13038 }
13039 default:
13040 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13041 }
13042
13043 }
13044 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13045 *pGCPtrEff = u32EffAddr;
13046 else
13047 {
13048 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13049 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13050 }
13051 }
13052 }
13053 else
13054 {
13055 uint64_t u64EffAddr;
13056
13057 /* Handle the rip+disp32 form with no registers first. */
13058 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13059 {
13060 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13061 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13062 }
13063 else
13064 {
13065 /* Get the register (or SIB) value. */
13066 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13067 {
13068 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13069 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13070 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13071 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13072 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13073 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13074 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13075 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13076 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13077 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13078 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13079 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13080 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13081 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13082 /* SIB */
13083 case 4:
13084 case 12:
13085 {
13086 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13087
13088 /* Get the index and scale it. */
13089 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13090 {
13091 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13092 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13093 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13094 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13095 case 4: u64EffAddr = 0; /*none */ break;
13096 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13097 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13098 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13099 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13100 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13101 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13102 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13103 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13104 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13105 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13106 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13107 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13108 }
13109 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13110
13111 /* add base */
13112 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13113 {
13114 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13115 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13116 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13117 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13118 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13119 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13120 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13121 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13122 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13123 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13124 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13125 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13126 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13127 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13128 /* complicated encodings */
13129 case 5:
13130 case 13:
13131 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13132 {
13133 if (!pVCpu->iem.s.uRexB)
13134 {
13135 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13136 SET_SS_DEF();
13137 }
13138 else
13139 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13140 }
13141 else
13142 {
13143 uint32_t u32Disp;
13144 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13145 u64EffAddr += (int32_t)u32Disp;
13146 }
13147 break;
13148 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13149 }
13150 break;
13151 }
13152 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13153 }
13154
13155 /* Get and add the displacement. */
13156 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13157 {
13158 case 0:
13159 break;
13160 case 1:
13161 {
13162 int8_t i8Disp;
13163 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13164 u64EffAddr += i8Disp;
13165 break;
13166 }
13167 case 2:
13168 {
13169 uint32_t u32Disp;
13170 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13171 u64EffAddr += (int32_t)u32Disp;
13172 break;
13173 }
13174 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13175 }
13176
13177 }
13178
13179 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13180 *pGCPtrEff = u64EffAddr;
13181 else
13182 {
13183 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13184 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13185 }
13186 }
13187
13188 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13189 return VINF_SUCCESS;
13190}
13191
13192
13193/**
13194 * Calculates the effective address of a ModR/M memory operand.
13195 *
13196 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13197 *
13198 * @return Strict VBox status code.
13199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13200 * @param bRm The ModRM byte.
13201 * @param cbImm The size of any immediate following the
13202 * effective address opcode bytes. Important for
13203 * RIP relative addressing.
13204 * @param pGCPtrEff Where to return the effective address.
13205 * @param offRsp RSP displacement.
13206 */
13207IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13208{
13209 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13210# define SET_SS_DEF() \
13211 do \
13212 { \
13213 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13214 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13215 } while (0)
13216
13217 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13218 {
13219/** @todo Check the effective address size crap! */
13220 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13221 {
13222 uint16_t u16EffAddr;
13223
13224 /* Handle the disp16 form with no registers first. */
13225 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13226 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13227 else
13228 {
13229 /* Get the displacment. */
13230 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13231 {
13232 case 0: u16EffAddr = 0; break;
13233 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13234 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13235 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13236 }
13237
13238 /* Add the base and index registers to the disp. */
13239 switch (bRm & X86_MODRM_RM_MASK)
13240 {
13241 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13242 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13243 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13244 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13245 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13246 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13247 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13248 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13249 }
13250 }
13251
13252 *pGCPtrEff = u16EffAddr;
13253 }
13254 else
13255 {
13256 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13257 uint32_t u32EffAddr;
13258
13259 /* Handle the disp32 form with no registers first. */
13260 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13261 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13262 else
13263 {
13264 /* Get the register (or SIB) value. */
13265 switch ((bRm & X86_MODRM_RM_MASK))
13266 {
13267 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13268 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13269 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13270 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13271 case 4: /* SIB */
13272 {
13273 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13274
13275 /* Get the index and scale it. */
13276 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13277 {
13278 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13279 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13280 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13281 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13282 case 4: u32EffAddr = 0; /*none */ break;
13283 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13284 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13285 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13286 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13287 }
13288 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13289
13290 /* add base */
13291 switch (bSib & X86_SIB_BASE_MASK)
13292 {
13293 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13294 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13295 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13296 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13297 case 4:
13298 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13299 SET_SS_DEF();
13300 break;
13301 case 5:
13302 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13303 {
13304 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13305 SET_SS_DEF();
13306 }
13307 else
13308 {
13309 uint32_t u32Disp;
13310 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13311 u32EffAddr += u32Disp;
13312 }
13313 break;
13314 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13315 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13316 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13317 }
13318 break;
13319 }
13320 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13321 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13322 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13323 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13324 }
13325
13326 /* Get and add the displacement. */
13327 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13328 {
13329 case 0:
13330 break;
13331 case 1:
13332 {
13333 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13334 u32EffAddr += i8Disp;
13335 break;
13336 }
13337 case 2:
13338 {
13339 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13340 u32EffAddr += u32Disp;
13341 break;
13342 }
13343 default:
13344 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13345 }
13346
13347 }
13348 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13349 *pGCPtrEff = u32EffAddr;
13350 else
13351 {
13352 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13353 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13354 }
13355 }
13356 }
13357 else
13358 {
13359 uint64_t u64EffAddr;
13360
13361 /* Handle the rip+disp32 form with no registers first. */
13362 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13363 {
13364 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13365 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13366 }
13367 else
13368 {
13369 /* Get the register (or SIB) value. */
13370 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13371 {
13372 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13373 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13374 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13375 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13376 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13377 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13378 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13379 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13380 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13381 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13382 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13383 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13384 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13385 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13386 /* SIB */
13387 case 4:
13388 case 12:
13389 {
13390 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13391
13392 /* Get the index and scale it. */
13393 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13394 {
13395 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13396 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13397 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13398 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13399 case 4: u64EffAddr = 0; /*none */ break;
13400 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13401 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13402 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13403 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13404 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13405 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13406 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13407 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13408 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13409 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13410 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13412 }
13413 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13414
13415 /* add base */
13416 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13417 {
13418 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13419 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13420 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13421 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13422 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13423 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13424 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13425 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13426 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13427 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13428 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13429 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13430 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13431 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13432 /* complicated encodings */
13433 case 5:
13434 case 13:
13435 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13436 {
13437 if (!pVCpu->iem.s.uRexB)
13438 {
13439 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13440 SET_SS_DEF();
13441 }
13442 else
13443 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13444 }
13445 else
13446 {
13447 uint32_t u32Disp;
13448 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13449 u64EffAddr += (int32_t)u32Disp;
13450 }
13451 break;
13452 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13453 }
13454 break;
13455 }
13456 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13457 }
13458
13459 /* Get and add the displacement. */
13460 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13461 {
13462 case 0:
13463 break;
13464 case 1:
13465 {
13466 int8_t i8Disp;
13467 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13468 u64EffAddr += i8Disp;
13469 break;
13470 }
13471 case 2:
13472 {
13473 uint32_t u32Disp;
13474 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13475 u64EffAddr += (int32_t)u32Disp;
13476 break;
13477 }
13478 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13479 }
13480
13481 }
13482
13483 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13484 *pGCPtrEff = u64EffAddr;
13485 else
13486 {
13487 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13488 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13489 }
13490 }
13491
13492 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13493 return VINF_SUCCESS;
13494}
13495
13496
13497#ifdef IEM_WITH_SETJMP
13498/**
13499 * Calculates the effective address of a ModR/M memory operand.
13500 *
13501 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13502 *
13503 * May longjmp on internal error.
13504 *
13505 * @return The effective address.
13506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13507 * @param bRm The ModRM byte.
13508 * @param cbImm The size of any immediate following the
13509 * effective address opcode bytes. Important for
13510 * RIP relative addressing.
13511 */
13512IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13513{
13514 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13515# define SET_SS_DEF() \
13516 do \
13517 { \
13518 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13519 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13520 } while (0)
13521
13522 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13523 {
13524/** @todo Check the effective address size crap! */
13525 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13526 {
13527 uint16_t u16EffAddr;
13528
13529 /* Handle the disp16 form with no registers first. */
13530 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13531 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13532 else
13533 {
13534 /* Get the displacment. */
13535 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13536 {
13537 case 0: u16EffAddr = 0; break;
13538 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13539 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13540 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13541 }
13542
13543 /* Add the base and index registers to the disp. */
13544 switch (bRm & X86_MODRM_RM_MASK)
13545 {
13546 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13547 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13548 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13549 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13550 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13551 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13552 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13553 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13554 }
13555 }
13556
13557 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13558 return u16EffAddr;
13559 }
13560
13561 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13562 uint32_t u32EffAddr;
13563
13564 /* Handle the disp32 form with no registers first. */
13565 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13566 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13567 else
13568 {
13569 /* Get the register (or SIB) value. */
13570 switch ((bRm & X86_MODRM_RM_MASK))
13571 {
13572 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13573 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13574 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13575 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13576 case 4: /* SIB */
13577 {
13578 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13579
13580 /* Get the index and scale it. */
13581 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13582 {
13583 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13584 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13585 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13586 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13587 case 4: u32EffAddr = 0; /*none */ break;
13588 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13589 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13590 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13591 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13592 }
13593 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13594
13595 /* add base */
13596 switch (bSib & X86_SIB_BASE_MASK)
13597 {
13598 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13599 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13600 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13601 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13602 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13603 case 5:
13604 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13605 {
13606 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13607 SET_SS_DEF();
13608 }
13609 else
13610 {
13611 uint32_t u32Disp;
13612 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13613 u32EffAddr += u32Disp;
13614 }
13615 break;
13616 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13617 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13618 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13619 }
13620 break;
13621 }
13622 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13623 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13624 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13625 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13626 }
13627
13628 /* Get and add the displacement. */
13629 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13630 {
13631 case 0:
13632 break;
13633 case 1:
13634 {
13635 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13636 u32EffAddr += i8Disp;
13637 break;
13638 }
13639 case 2:
13640 {
13641 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13642 u32EffAddr += u32Disp;
13643 break;
13644 }
13645 default:
13646 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13647 }
13648 }
13649
13650 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13651 {
13652 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13653 return u32EffAddr;
13654 }
13655 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13656 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13657 return u32EffAddr & UINT16_MAX;
13658 }
13659
13660 uint64_t u64EffAddr;
13661
13662 /* Handle the rip+disp32 form with no registers first. */
13663 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13664 {
13665 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13666 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13667 }
13668 else
13669 {
13670 /* Get the register (or SIB) value. */
13671 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13672 {
13673 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13674 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13675 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13676 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13677 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13678 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13679 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13680 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13681 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13682 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13683 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13684 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13685 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13686 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13687 /* SIB */
13688 case 4:
13689 case 12:
13690 {
13691 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13692
13693 /* Get the index and scale it. */
13694 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13695 {
13696 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13697 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13698 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13699 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13700 case 4: u64EffAddr = 0; /*none */ break;
13701 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13702 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13703 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13704 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13705 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13706 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13707 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13708 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13709 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13710 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13711 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13712 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13713 }
13714 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13715
13716 /* add base */
13717 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13718 {
13719 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13720 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13721 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13722 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13723 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13724 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13725 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13726 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13727 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13728 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13729 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13730 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13731 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13732 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13733 /* complicated encodings */
13734 case 5:
13735 case 13:
13736 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13737 {
13738 if (!pVCpu->iem.s.uRexB)
13739 {
13740 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13741 SET_SS_DEF();
13742 }
13743 else
13744 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13745 }
13746 else
13747 {
13748 uint32_t u32Disp;
13749 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13750 u64EffAddr += (int32_t)u32Disp;
13751 }
13752 break;
13753 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13754 }
13755 break;
13756 }
13757 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13758 }
13759
13760 /* Get and add the displacement. */
13761 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13762 {
13763 case 0:
13764 break;
13765 case 1:
13766 {
13767 int8_t i8Disp;
13768 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13769 u64EffAddr += i8Disp;
13770 break;
13771 }
13772 case 2:
13773 {
13774 uint32_t u32Disp;
13775 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13776 u64EffAddr += (int32_t)u32Disp;
13777 break;
13778 }
13779 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13780 }
13781
13782 }
13783
13784 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13785 {
13786 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13787 return u64EffAddr;
13788 }
13789 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13790 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13791 return u64EffAddr & UINT32_MAX;
13792}
13793#endif /* IEM_WITH_SETJMP */
13794
13795/** @} */
13796
13797
13798
13799/*
13800 * Include the instructions
13801 */
13802#include "IEMAllInstructions.cpp.h"
13803
13804
13805
13806#ifdef LOG_ENABLED
13807/**
13808 * Logs the current instruction.
13809 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13810 * @param fSameCtx Set if we have the same context information as the VMM,
13811 * clear if we may have already executed an instruction in
13812 * our debug context. When clear, we assume IEMCPU holds
13813 * valid CPU mode info.
13814 *
13815 * The @a fSameCtx parameter is now misleading and obsolete.
13816 * @param pszFunction The IEM function doing the execution.
13817 */
13818IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13819{
13820# ifdef IN_RING3
13821 if (LogIs2Enabled())
13822 {
13823 char szInstr[256];
13824 uint32_t cbInstr = 0;
13825 if (fSameCtx)
13826 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13827 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13828 szInstr, sizeof(szInstr), &cbInstr);
13829 else
13830 {
13831 uint32_t fFlags = 0;
13832 switch (pVCpu->iem.s.enmCpuMode)
13833 {
13834 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13835 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13836 case IEMMODE_16BIT:
13837 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13838 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13839 else
13840 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13841 break;
13842 }
13843 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13844 szInstr, sizeof(szInstr), &cbInstr);
13845 }
13846
13847 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13848 Log2(("**** %s\n"
13849 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13850 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13851 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13852 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13853 " %s\n"
13854 , pszFunction,
13855 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13856 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13858 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13859 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13860 szInstr));
13861
13862 if (LogIs3Enabled())
13863 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13864 }
13865 else
13866# endif
13867 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13868 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13869 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13870}
13871#endif /* LOG_ENABLED */
13872
13873
13874/**
13875 * Makes status code addjustments (pass up from I/O and access handler)
13876 * as well as maintaining statistics.
13877 *
13878 * @returns Strict VBox status code to pass up.
13879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13880 * @param rcStrict The status from executing an instruction.
13881 */
13882DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13883{
13884 if (rcStrict != VINF_SUCCESS)
13885 {
13886 if (RT_SUCCESS(rcStrict))
13887 {
13888 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13889 || rcStrict == VINF_IOM_R3_IOPORT_READ
13890 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13891 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13892 || rcStrict == VINF_IOM_R3_MMIO_READ
13893 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13894 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13895 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13896 || rcStrict == VINF_CPUM_R3_MSR_READ
13897 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13898 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13899 || rcStrict == VINF_EM_RAW_TO_R3
13900 || rcStrict == VINF_EM_TRIPLE_FAULT
13901 || rcStrict == VINF_GIM_R3_HYPERCALL
13902 /* raw-mode / virt handlers only: */
13903 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13904 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13905 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13906 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13907 || rcStrict == VINF_SELM_SYNC_GDT
13908 || rcStrict == VINF_CSAM_PENDING_ACTION
13909 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13910 /* nested hw.virt codes: */
13911 || rcStrict == VINF_VMX_VMEXIT
13912 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13913 || rcStrict == VINF_SVM_VMEXIT
13914 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13915/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13916 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13917#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13918 if ( rcStrict == VINF_VMX_VMEXIT
13919 && rcPassUp == VINF_SUCCESS)
13920 rcStrict = VINF_SUCCESS;
13921 else
13922#endif
13923#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13924 if ( rcStrict == VINF_SVM_VMEXIT
13925 && rcPassUp == VINF_SUCCESS)
13926 rcStrict = VINF_SUCCESS;
13927 else
13928#endif
13929 if (rcPassUp == VINF_SUCCESS)
13930 pVCpu->iem.s.cRetInfStatuses++;
13931 else if ( rcPassUp < VINF_EM_FIRST
13932 || rcPassUp > VINF_EM_LAST
13933 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13934 {
13935 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13936 pVCpu->iem.s.cRetPassUpStatus++;
13937 rcStrict = rcPassUp;
13938 }
13939 else
13940 {
13941 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13942 pVCpu->iem.s.cRetInfStatuses++;
13943 }
13944 }
13945 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13946 pVCpu->iem.s.cRetAspectNotImplemented++;
13947 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13948 pVCpu->iem.s.cRetInstrNotImplemented++;
13949 else
13950 pVCpu->iem.s.cRetErrStatuses++;
13951 }
13952 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13953 {
13954 pVCpu->iem.s.cRetPassUpStatus++;
13955 rcStrict = pVCpu->iem.s.rcPassUp;
13956 }
13957
13958 return rcStrict;
13959}
13960
13961
13962/**
13963 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13964 * IEMExecOneWithPrefetchedByPC.
13965 *
13966 * Similar code is found in IEMExecLots.
13967 *
13968 * @return Strict VBox status code.
13969 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13970 * @param fExecuteInhibit If set, execute the instruction following CLI,
13971 * POP SS and MOV SS,GR.
13972 * @param pszFunction The calling function name.
13973 */
13974DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13975{
13976 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13977 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13978 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13979 RT_NOREF_PV(pszFunction);
13980
13981#ifdef IEM_WITH_SETJMP
13982 VBOXSTRICTRC rcStrict;
13983 jmp_buf JmpBuf;
13984 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13985 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13986 if ((rcStrict = setjmp(JmpBuf)) == 0)
13987 {
13988 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13989 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13990 }
13991 else
13992 pVCpu->iem.s.cLongJumps++;
13993 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13994#else
13995 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13996 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13997#endif
13998 if (rcStrict == VINF_SUCCESS)
13999 pVCpu->iem.s.cInstructions++;
14000 if (pVCpu->iem.s.cActiveMappings > 0)
14001 {
14002 Assert(rcStrict != VINF_SUCCESS);
14003 iemMemRollback(pVCpu);
14004 }
14005 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14006 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14007 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14008
14009//#ifdef DEBUG
14010// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14011//#endif
14012
14013#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14014 /*
14015 * Perform any VMX nested-guest instruction boundary actions.
14016 *
14017 * If any of these causes a VM-exit, we must skip executing the next
14018 * instruction (would run into stale page tables). A VM-exit makes sure
14019 * there is no interrupt-inhibition, so that should ensure we don't go
14020 * to try execute the next instruction. Clearing fExecuteInhibit is
14021 * problematic because of the setjmp/longjmp clobbering above.
14022 */
14023 if ( rcStrict == VINF_SUCCESS
14024 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14025 {
14026 /* TPR-below threshold/APIC write has the highest priority. */
14027 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14028 {
14029 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14030 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14031 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14032 }
14033 /* MTF takes priority over VMX-preemption timer. */
14034 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
14035 {
14036 rcStrict = iemVmxVmexitMtf(pVCpu);
14037 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14038 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14039 }
14040 /** Finally, check if the VMX preemption timer has expired. */
14041 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14042 {
14043 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14044 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
14045 rcStrict = VINF_SUCCESS;
14046 else
14047 {
14048 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14049 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14050 }
14051 }
14052 }
14053#endif
14054
14055 /* Execute the next instruction as well if a cli, pop ss or
14056 mov ss, Gr has just completed successfully. */
14057 if ( fExecuteInhibit
14058 && rcStrict == VINF_SUCCESS
14059 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14060 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14061 {
14062 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14063 if (rcStrict == VINF_SUCCESS)
14064 {
14065#ifdef LOG_ENABLED
14066 iemLogCurInstr(pVCpu, false, pszFunction);
14067#endif
14068#ifdef IEM_WITH_SETJMP
14069 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14070 if ((rcStrict = setjmp(JmpBuf)) == 0)
14071 {
14072 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14073 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14074 }
14075 else
14076 pVCpu->iem.s.cLongJumps++;
14077 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14078#else
14079 IEM_OPCODE_GET_NEXT_U8(&b);
14080 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14081#endif
14082 if (rcStrict == VINF_SUCCESS)
14083 pVCpu->iem.s.cInstructions++;
14084 if (pVCpu->iem.s.cActiveMappings > 0)
14085 {
14086 Assert(rcStrict != VINF_SUCCESS);
14087 iemMemRollback(pVCpu);
14088 }
14089 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14090 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14091 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14092 }
14093 else if (pVCpu->iem.s.cActiveMappings > 0)
14094 iemMemRollback(pVCpu);
14095 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14096 }
14097
14098 /*
14099 * Return value fiddling, statistics and sanity assertions.
14100 */
14101 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14102
14103 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14104 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14105 return rcStrict;
14106}
14107
14108
14109#ifdef IN_RC
14110/**
14111 * Re-enters raw-mode or ensure we return to ring-3.
14112 *
14113 * @returns rcStrict, maybe modified.
14114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14115 * @param rcStrict The status code returne by the interpreter.
14116 */
14117DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14118{
14119 if ( !pVCpu->iem.s.fInPatchCode
14120 && ( rcStrict == VINF_SUCCESS
14121 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14122 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14123 {
14124 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14125 CPUMRawEnter(pVCpu);
14126 else
14127 {
14128 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14129 rcStrict = VINF_EM_RESCHEDULE;
14130 }
14131 }
14132 return rcStrict;
14133}
14134#endif
14135
14136
14137/**
14138 * Execute one instruction.
14139 *
14140 * @return Strict VBox status code.
14141 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14142 */
14143VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14144{
14145#ifdef LOG_ENABLED
14146 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14147#endif
14148
14149 /*
14150 * Do the decoding and emulation.
14151 */
14152 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14153 if (rcStrict == VINF_SUCCESS)
14154 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14155 else if (pVCpu->iem.s.cActiveMappings > 0)
14156 iemMemRollback(pVCpu);
14157
14158#ifdef IN_RC
14159 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14160#endif
14161 if (rcStrict != VINF_SUCCESS)
14162 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14163 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14164 return rcStrict;
14165}
14166
14167
14168VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14169{
14170 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14171
14172 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14173 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14174 if (rcStrict == VINF_SUCCESS)
14175 {
14176 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14177 if (pcbWritten)
14178 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14179 }
14180 else if (pVCpu->iem.s.cActiveMappings > 0)
14181 iemMemRollback(pVCpu);
14182
14183#ifdef IN_RC
14184 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14185#endif
14186 return rcStrict;
14187}
14188
14189
14190VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14191 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14192{
14193 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14194
14195 VBOXSTRICTRC rcStrict;
14196 if ( cbOpcodeBytes
14197 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14198 {
14199 iemInitDecoder(pVCpu, false);
14200#ifdef IEM_WITH_CODE_TLB
14201 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14202 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14203 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14204 pVCpu->iem.s.offCurInstrStart = 0;
14205 pVCpu->iem.s.offInstrNextByte = 0;
14206#else
14207 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14208 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14209#endif
14210 rcStrict = VINF_SUCCESS;
14211 }
14212 else
14213 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14214 if (rcStrict == VINF_SUCCESS)
14215 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14216 else if (pVCpu->iem.s.cActiveMappings > 0)
14217 iemMemRollback(pVCpu);
14218
14219#ifdef IN_RC
14220 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14221#endif
14222 return rcStrict;
14223}
14224
14225
14226VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14227{
14228 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14229
14230 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14231 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14232 if (rcStrict == VINF_SUCCESS)
14233 {
14234 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14235 if (pcbWritten)
14236 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14237 }
14238 else if (pVCpu->iem.s.cActiveMappings > 0)
14239 iemMemRollback(pVCpu);
14240
14241#ifdef IN_RC
14242 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14243#endif
14244 return rcStrict;
14245}
14246
14247
14248VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14249 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14250{
14251 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14252
14253 VBOXSTRICTRC rcStrict;
14254 if ( cbOpcodeBytes
14255 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14256 {
14257 iemInitDecoder(pVCpu, true);
14258#ifdef IEM_WITH_CODE_TLB
14259 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14260 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14261 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14262 pVCpu->iem.s.offCurInstrStart = 0;
14263 pVCpu->iem.s.offInstrNextByte = 0;
14264#else
14265 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14266 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14267#endif
14268 rcStrict = VINF_SUCCESS;
14269 }
14270 else
14271 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14272 if (rcStrict == VINF_SUCCESS)
14273 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14274 else if (pVCpu->iem.s.cActiveMappings > 0)
14275 iemMemRollback(pVCpu);
14276
14277#ifdef IN_RC
14278 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14279#endif
14280 return rcStrict;
14281}
14282
14283
14284/**
14285 * For debugging DISGetParamSize, may come in handy.
14286 *
14287 * @returns Strict VBox status code.
14288 * @param pVCpu The cross context virtual CPU structure of the
14289 * calling EMT.
14290 * @param pCtxCore The context core structure.
14291 * @param OpcodeBytesPC The PC of the opcode bytes.
14292 * @param pvOpcodeBytes Prefeched opcode bytes.
14293 * @param cbOpcodeBytes Number of prefetched bytes.
14294 * @param pcbWritten Where to return the number of bytes written.
14295 * Optional.
14296 */
14297VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14298 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14299 uint32_t *pcbWritten)
14300{
14301 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14302
14303 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14304 VBOXSTRICTRC rcStrict;
14305 if ( cbOpcodeBytes
14306 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14307 {
14308 iemInitDecoder(pVCpu, true);
14309#ifdef IEM_WITH_CODE_TLB
14310 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14311 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14312 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14313 pVCpu->iem.s.offCurInstrStart = 0;
14314 pVCpu->iem.s.offInstrNextByte = 0;
14315#else
14316 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14317 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14318#endif
14319 rcStrict = VINF_SUCCESS;
14320 }
14321 else
14322 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14323 if (rcStrict == VINF_SUCCESS)
14324 {
14325 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14326 if (pcbWritten)
14327 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14328 }
14329 else if (pVCpu->iem.s.cActiveMappings > 0)
14330 iemMemRollback(pVCpu);
14331
14332#ifdef IN_RC
14333 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14334#endif
14335 return rcStrict;
14336}
14337
14338
14339VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14340{
14341 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14342 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14343
14344 /*
14345 * See if there is an interrupt pending in TRPM, inject it if we can.
14346 */
14347 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14348#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14349 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14350 if (fIntrEnabled)
14351 {
14352 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14353 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14354 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14355 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14356 else
14357 {
14358 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14359 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14360 }
14361 }
14362#else
14363 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14364#endif
14365 if ( fIntrEnabled
14366 && TRPMHasTrap(pVCpu)
14367 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14368 {
14369 uint8_t u8TrapNo;
14370 TRPMEVENT enmType;
14371 RTGCUINT uErrCode;
14372 RTGCPTR uCr2;
14373 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14374 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14375 TRPMResetTrap(pVCpu);
14376#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14377 /* Injecting an event may cause a VM-exit. */
14378 if ( rcStrict != VINF_SUCCESS
14379 && rcStrict != VINF_IEM_RAISED_XCPT)
14380 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14381#else
14382 NOREF(rcStrict);
14383#endif
14384 }
14385
14386 /*
14387 * Initial decoder init w/ prefetch, then setup setjmp.
14388 */
14389 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14390 if (rcStrict == VINF_SUCCESS)
14391 {
14392#ifdef IEM_WITH_SETJMP
14393 jmp_buf JmpBuf;
14394 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14395 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14396 pVCpu->iem.s.cActiveMappings = 0;
14397 if ((rcStrict = setjmp(JmpBuf)) == 0)
14398#endif
14399 {
14400 /*
14401 * The run loop. We limit ourselves to 4096 instructions right now.
14402 */
14403 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14404 PVM pVM = pVCpu->CTX_SUFF(pVM);
14405 for (;;)
14406 {
14407 /*
14408 * Log the state.
14409 */
14410#ifdef LOG_ENABLED
14411 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14412#endif
14413
14414 /*
14415 * Do the decoding and emulation.
14416 */
14417 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14418 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14419 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14420 {
14421 Assert(pVCpu->iem.s.cActiveMappings == 0);
14422 pVCpu->iem.s.cInstructions++;
14423 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14424 {
14425 uint64_t fCpu = pVCpu->fLocalForcedActions
14426 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14427 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14428 | VMCPU_FF_TLB_FLUSH
14429#ifdef VBOX_WITH_RAW_MODE
14430 | VMCPU_FF_TRPM_SYNC_IDT
14431 | VMCPU_FF_SELM_SYNC_TSS
14432 | VMCPU_FF_SELM_SYNC_GDT
14433 | VMCPU_FF_SELM_SYNC_LDT
14434#endif
14435 | VMCPU_FF_INHIBIT_INTERRUPTS
14436 | VMCPU_FF_BLOCK_NMIS
14437 | VMCPU_FF_UNHALT ));
14438
14439 if (RT_LIKELY( ( !fCpu
14440 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14441 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14442 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14443 {
14444 if (cMaxInstructionsGccStupidity-- > 0)
14445 {
14446 /* Poll timers every now an then according to the caller's specs. */
14447 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14448 || !TMTimerPollBool(pVM, pVCpu))
14449 {
14450 Assert(pVCpu->iem.s.cActiveMappings == 0);
14451 iemReInitDecoder(pVCpu);
14452 continue;
14453 }
14454 }
14455 }
14456 }
14457 Assert(pVCpu->iem.s.cActiveMappings == 0);
14458 }
14459 else if (pVCpu->iem.s.cActiveMappings > 0)
14460 iemMemRollback(pVCpu);
14461 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14462 break;
14463 }
14464 }
14465#ifdef IEM_WITH_SETJMP
14466 else
14467 {
14468 if (pVCpu->iem.s.cActiveMappings > 0)
14469 iemMemRollback(pVCpu);
14470# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14471 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14472# endif
14473 pVCpu->iem.s.cLongJumps++;
14474 }
14475 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14476#endif
14477
14478 /*
14479 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14480 */
14481 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14482 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14483 }
14484 else
14485 {
14486 if (pVCpu->iem.s.cActiveMappings > 0)
14487 iemMemRollback(pVCpu);
14488
14489#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14490 /*
14491 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14492 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14493 */
14494 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14495#endif
14496 }
14497
14498 /*
14499 * Maybe re-enter raw-mode and log.
14500 */
14501#ifdef IN_RC
14502 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14503#endif
14504 if (rcStrict != VINF_SUCCESS)
14505 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14506 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14507 if (pcInstructions)
14508 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14509 return rcStrict;
14510}
14511
14512
14513/**
14514 * Interface used by EMExecuteExec, does exit statistics and limits.
14515 *
14516 * @returns Strict VBox status code.
14517 * @param pVCpu The cross context virtual CPU structure.
14518 * @param fWillExit To be defined.
14519 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14520 * @param cMaxInstructions Maximum number of instructions to execute.
14521 * @param cMaxInstructionsWithoutExits
14522 * The max number of instructions without exits.
14523 * @param pStats Where to return statistics.
14524 */
14525VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14526 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14527{
14528 NOREF(fWillExit); /** @todo define flexible exit crits */
14529
14530 /*
14531 * Initialize return stats.
14532 */
14533 pStats->cInstructions = 0;
14534 pStats->cExits = 0;
14535 pStats->cMaxExitDistance = 0;
14536 pStats->cReserved = 0;
14537
14538 /*
14539 * Initial decoder init w/ prefetch, then setup setjmp.
14540 */
14541 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14542 if (rcStrict == VINF_SUCCESS)
14543 {
14544#ifdef IEM_WITH_SETJMP
14545 jmp_buf JmpBuf;
14546 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14547 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14548 pVCpu->iem.s.cActiveMappings = 0;
14549 if ((rcStrict = setjmp(JmpBuf)) == 0)
14550#endif
14551 {
14552#ifdef IN_RING0
14553 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14554#endif
14555 uint32_t cInstructionSinceLastExit = 0;
14556
14557 /*
14558 * The run loop. We limit ourselves to 4096 instructions right now.
14559 */
14560 PVM pVM = pVCpu->CTX_SUFF(pVM);
14561 for (;;)
14562 {
14563 /*
14564 * Log the state.
14565 */
14566#ifdef LOG_ENABLED
14567 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14568#endif
14569
14570 /*
14571 * Do the decoding and emulation.
14572 */
14573 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14574
14575 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14576 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14577
14578 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14579 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14580 {
14581 pStats->cExits += 1;
14582 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14583 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14584 cInstructionSinceLastExit = 0;
14585 }
14586
14587 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14588 {
14589 Assert(pVCpu->iem.s.cActiveMappings == 0);
14590 pVCpu->iem.s.cInstructions++;
14591 pStats->cInstructions++;
14592 cInstructionSinceLastExit++;
14593 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14594 {
14595 uint64_t fCpu = pVCpu->fLocalForcedActions
14596 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14597 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14598 | VMCPU_FF_TLB_FLUSH
14599#ifdef VBOX_WITH_RAW_MODE
14600 | VMCPU_FF_TRPM_SYNC_IDT
14601 | VMCPU_FF_SELM_SYNC_TSS
14602 | VMCPU_FF_SELM_SYNC_GDT
14603 | VMCPU_FF_SELM_SYNC_LDT
14604#endif
14605 | VMCPU_FF_INHIBIT_INTERRUPTS
14606 | VMCPU_FF_BLOCK_NMIS
14607 | VMCPU_FF_UNHALT ));
14608
14609 if (RT_LIKELY( ( ( !fCpu
14610 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14611 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14612 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14613 || pStats->cInstructions < cMinInstructions))
14614 {
14615 if (pStats->cInstructions < cMaxInstructions)
14616 {
14617 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14618 {
14619#ifdef IN_RING0
14620 if ( !fCheckPreemptionPending
14621 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14622#endif
14623 {
14624 Assert(pVCpu->iem.s.cActiveMappings == 0);
14625 iemReInitDecoder(pVCpu);
14626 continue;
14627 }
14628#ifdef IN_RING0
14629 rcStrict = VINF_EM_RAW_INTERRUPT;
14630 break;
14631#endif
14632 }
14633 }
14634 }
14635 Assert(!(fCpu & VMCPU_FF_IEM));
14636 }
14637 Assert(pVCpu->iem.s.cActiveMappings == 0);
14638 }
14639 else if (pVCpu->iem.s.cActiveMappings > 0)
14640 iemMemRollback(pVCpu);
14641 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14642 break;
14643 }
14644 }
14645#ifdef IEM_WITH_SETJMP
14646 else
14647 {
14648 if (pVCpu->iem.s.cActiveMappings > 0)
14649 iemMemRollback(pVCpu);
14650 pVCpu->iem.s.cLongJumps++;
14651 }
14652 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14653#endif
14654
14655 /*
14656 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14657 */
14658 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14659 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14660 }
14661 else
14662 {
14663 if (pVCpu->iem.s.cActiveMappings > 0)
14664 iemMemRollback(pVCpu);
14665
14666#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14667 /*
14668 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14669 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14670 */
14671 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14672#endif
14673 }
14674
14675 /*
14676 * Maybe re-enter raw-mode and log.
14677 */
14678#ifdef IN_RC
14679 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14680#endif
14681 if (rcStrict != VINF_SUCCESS)
14682 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14683 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14684 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14685 return rcStrict;
14686}
14687
14688
14689/**
14690 * Injects a trap, fault, abort, software interrupt or external interrupt.
14691 *
14692 * The parameter list matches TRPMQueryTrapAll pretty closely.
14693 *
14694 * @returns Strict VBox status code.
14695 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14696 * @param u8TrapNo The trap number.
14697 * @param enmType What type is it (trap/fault/abort), software
14698 * interrupt or hardware interrupt.
14699 * @param uErrCode The error code if applicable.
14700 * @param uCr2 The CR2 value if applicable.
14701 * @param cbInstr The instruction length (only relevant for
14702 * software interrupts).
14703 */
14704VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14705 uint8_t cbInstr)
14706{
14707 iemInitDecoder(pVCpu, false);
14708#ifdef DBGFTRACE_ENABLED
14709 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14710 u8TrapNo, enmType, uErrCode, uCr2);
14711#endif
14712
14713 uint32_t fFlags;
14714 switch (enmType)
14715 {
14716 case TRPM_HARDWARE_INT:
14717 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14718 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14719 uErrCode = uCr2 = 0;
14720 break;
14721
14722 case TRPM_SOFTWARE_INT:
14723 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14724 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14725 uErrCode = uCr2 = 0;
14726 break;
14727
14728 case TRPM_TRAP:
14729 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14730 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14731 if (u8TrapNo == X86_XCPT_PF)
14732 fFlags |= IEM_XCPT_FLAGS_CR2;
14733 switch (u8TrapNo)
14734 {
14735 case X86_XCPT_DF:
14736 case X86_XCPT_TS:
14737 case X86_XCPT_NP:
14738 case X86_XCPT_SS:
14739 case X86_XCPT_PF:
14740 case X86_XCPT_AC:
14741 fFlags |= IEM_XCPT_FLAGS_ERR;
14742 break;
14743
14744 case X86_XCPT_NMI:
14745 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14746 break;
14747 }
14748 break;
14749
14750 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14751 }
14752
14753 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14754
14755 if (pVCpu->iem.s.cActiveMappings > 0)
14756 iemMemRollback(pVCpu);
14757
14758 return rcStrict;
14759}
14760
14761
14762/**
14763 * Injects the active TRPM event.
14764 *
14765 * @returns Strict VBox status code.
14766 * @param pVCpu The cross context virtual CPU structure.
14767 */
14768VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14769{
14770#ifndef IEM_IMPLEMENTS_TASKSWITCH
14771 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14772#else
14773 uint8_t u8TrapNo;
14774 TRPMEVENT enmType;
14775 RTGCUINT uErrCode;
14776 RTGCUINTPTR uCr2;
14777 uint8_t cbInstr;
14778 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14779 if (RT_FAILURE(rc))
14780 return rc;
14781
14782 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14783#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14784 if (rcStrict == VINF_SVM_VMEXIT)
14785 rcStrict = VINF_SUCCESS;
14786#endif
14787#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14788 if (rcStrict == VINF_VMX_VMEXIT)
14789 rcStrict = VINF_SUCCESS;
14790#endif
14791 /** @todo Are there any other codes that imply the event was successfully
14792 * delivered to the guest? See @bugref{6607}. */
14793 if ( rcStrict == VINF_SUCCESS
14794 || rcStrict == VINF_IEM_RAISED_XCPT)
14795 TRPMResetTrap(pVCpu);
14796
14797 return rcStrict;
14798#endif
14799}
14800
14801
14802VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14803{
14804 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14805 return VERR_NOT_IMPLEMENTED;
14806}
14807
14808
14809VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14810{
14811 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14812 return VERR_NOT_IMPLEMENTED;
14813}
14814
14815
14816#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14817/**
14818 * Executes a IRET instruction with default operand size.
14819 *
14820 * This is for PATM.
14821 *
14822 * @returns VBox status code.
14823 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14824 * @param pCtxCore The register frame.
14825 */
14826VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14827{
14828 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14829
14830 iemCtxCoreToCtx(pCtx, pCtxCore);
14831 iemInitDecoder(pVCpu);
14832 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14833 if (rcStrict == VINF_SUCCESS)
14834 iemCtxToCtxCore(pCtxCore, pCtx);
14835 else
14836 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14837 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14838 return rcStrict;
14839}
14840#endif
14841
14842
14843/**
14844 * Macro used by the IEMExec* method to check the given instruction length.
14845 *
14846 * Will return on failure!
14847 *
14848 * @param a_cbInstr The given instruction length.
14849 * @param a_cbMin The minimum length.
14850 */
14851#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14852 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14853 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14854
14855
14856/**
14857 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14858 *
14859 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14860 *
14861 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14863 * @param rcStrict The status code to fiddle.
14864 */
14865DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14866{
14867 iemUninitExec(pVCpu);
14868#ifdef IN_RC
14869 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14870#else
14871 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14872#endif
14873}
14874
14875
14876/**
14877 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14878 *
14879 * This API ASSUMES that the caller has already verified that the guest code is
14880 * allowed to access the I/O port. (The I/O port is in the DX register in the
14881 * guest state.)
14882 *
14883 * @returns Strict VBox status code.
14884 * @param pVCpu The cross context virtual CPU structure.
14885 * @param cbValue The size of the I/O port access (1, 2, or 4).
14886 * @param enmAddrMode The addressing mode.
14887 * @param fRepPrefix Indicates whether a repeat prefix is used
14888 * (doesn't matter which for this instruction).
14889 * @param cbInstr The instruction length in bytes.
14890 * @param iEffSeg The effective segment address.
14891 * @param fIoChecked Whether the access to the I/O port has been
14892 * checked or not. It's typically checked in the
14893 * HM scenario.
14894 */
14895VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14896 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14897{
14898 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14899 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14900
14901 /*
14902 * State init.
14903 */
14904 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14905
14906 /*
14907 * Switch orgy for getting to the right handler.
14908 */
14909 VBOXSTRICTRC rcStrict;
14910 if (fRepPrefix)
14911 {
14912 switch (enmAddrMode)
14913 {
14914 case IEMMODE_16BIT:
14915 switch (cbValue)
14916 {
14917 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14918 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14919 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14920 default:
14921 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14922 }
14923 break;
14924
14925 case IEMMODE_32BIT:
14926 switch (cbValue)
14927 {
14928 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14929 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14930 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14931 default:
14932 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14933 }
14934 break;
14935
14936 case IEMMODE_64BIT:
14937 switch (cbValue)
14938 {
14939 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14940 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14941 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14942 default:
14943 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14944 }
14945 break;
14946
14947 default:
14948 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14949 }
14950 }
14951 else
14952 {
14953 switch (enmAddrMode)
14954 {
14955 case IEMMODE_16BIT:
14956 switch (cbValue)
14957 {
14958 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14959 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14960 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14961 default:
14962 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14963 }
14964 break;
14965
14966 case IEMMODE_32BIT:
14967 switch (cbValue)
14968 {
14969 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14970 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14971 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14972 default:
14973 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14974 }
14975 break;
14976
14977 case IEMMODE_64BIT:
14978 switch (cbValue)
14979 {
14980 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14981 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14982 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14983 default:
14984 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14985 }
14986 break;
14987
14988 default:
14989 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14990 }
14991 }
14992
14993 if (pVCpu->iem.s.cActiveMappings)
14994 iemMemRollback(pVCpu);
14995
14996 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14997}
14998
14999
15000/**
15001 * Interface for HM and EM for executing string I/O IN (read) instructions.
15002 *
15003 * This API ASSUMES that the caller has already verified that the guest code is
15004 * allowed to access the I/O port. (The I/O port is in the DX register in the
15005 * guest state.)
15006 *
15007 * @returns Strict VBox status code.
15008 * @param pVCpu The cross context virtual CPU structure.
15009 * @param cbValue The size of the I/O port access (1, 2, or 4).
15010 * @param enmAddrMode The addressing mode.
15011 * @param fRepPrefix Indicates whether a repeat prefix is used
15012 * (doesn't matter which for this instruction).
15013 * @param cbInstr The instruction length in bytes.
15014 * @param fIoChecked Whether the access to the I/O port has been
15015 * checked or not. It's typically checked in the
15016 * HM scenario.
15017 */
15018VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15019 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15020{
15021 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15022
15023 /*
15024 * State init.
15025 */
15026 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15027
15028 /*
15029 * Switch orgy for getting to the right handler.
15030 */
15031 VBOXSTRICTRC rcStrict;
15032 if (fRepPrefix)
15033 {
15034 switch (enmAddrMode)
15035 {
15036 case IEMMODE_16BIT:
15037 switch (cbValue)
15038 {
15039 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15040 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15041 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15042 default:
15043 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15044 }
15045 break;
15046
15047 case IEMMODE_32BIT:
15048 switch (cbValue)
15049 {
15050 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15051 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15052 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15053 default:
15054 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15055 }
15056 break;
15057
15058 case IEMMODE_64BIT:
15059 switch (cbValue)
15060 {
15061 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15062 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15063 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15064 default:
15065 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15066 }
15067 break;
15068
15069 default:
15070 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15071 }
15072 }
15073 else
15074 {
15075 switch (enmAddrMode)
15076 {
15077 case IEMMODE_16BIT:
15078 switch (cbValue)
15079 {
15080 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15081 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15082 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15083 default:
15084 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15085 }
15086 break;
15087
15088 case IEMMODE_32BIT:
15089 switch (cbValue)
15090 {
15091 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15092 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15093 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15094 default:
15095 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15096 }
15097 break;
15098
15099 case IEMMODE_64BIT:
15100 switch (cbValue)
15101 {
15102 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15103 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15104 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15105 default:
15106 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15107 }
15108 break;
15109
15110 default:
15111 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15112 }
15113 }
15114
15115 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15116 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15117}
15118
15119
15120/**
15121 * Interface for rawmode to write execute an OUT instruction.
15122 *
15123 * @returns Strict VBox status code.
15124 * @param pVCpu The cross context virtual CPU structure.
15125 * @param cbInstr The instruction length in bytes.
15126 * @param u16Port The port to read.
15127 * @param fImm Whether the port is specified using an immediate operand or
15128 * using the implicit DX register.
15129 * @param cbReg The register size.
15130 *
15131 * @remarks In ring-0 not all of the state needs to be synced in.
15132 */
15133VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15134{
15135 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15136 Assert(cbReg <= 4 && cbReg != 3);
15137
15138 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15139 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15140 Assert(!pVCpu->iem.s.cActiveMappings);
15141 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15142}
15143
15144
15145/**
15146 * Interface for rawmode to write execute an IN instruction.
15147 *
15148 * @returns Strict VBox status code.
15149 * @param pVCpu The cross context virtual CPU structure.
15150 * @param cbInstr The instruction length in bytes.
15151 * @param u16Port The port to read.
15152 * @param fImm Whether the port is specified using an immediate operand or
15153 * using the implicit DX.
15154 * @param cbReg The register size.
15155 */
15156VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15157{
15158 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15159 Assert(cbReg <= 4 && cbReg != 3);
15160
15161 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15162 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15163 Assert(!pVCpu->iem.s.cActiveMappings);
15164 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15165}
15166
15167
15168/**
15169 * Interface for HM and EM to write to a CRx register.
15170 *
15171 * @returns Strict VBox status code.
15172 * @param pVCpu The cross context virtual CPU structure.
15173 * @param cbInstr The instruction length in bytes.
15174 * @param iCrReg The control register number (destination).
15175 * @param iGReg The general purpose register number (source).
15176 *
15177 * @remarks In ring-0 not all of the state needs to be synced in.
15178 */
15179VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15180{
15181 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15182 Assert(iCrReg < 16);
15183 Assert(iGReg < 16);
15184
15185 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15186 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15187 Assert(!pVCpu->iem.s.cActiveMappings);
15188 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15189}
15190
15191
15192/**
15193 * Interface for HM and EM to read from a CRx register.
15194 *
15195 * @returns Strict VBox status code.
15196 * @param pVCpu The cross context virtual CPU structure.
15197 * @param cbInstr The instruction length in bytes.
15198 * @param iGReg The general purpose register number (destination).
15199 * @param iCrReg The control register number (source).
15200 *
15201 * @remarks In ring-0 not all of the state needs to be synced in.
15202 */
15203VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15204{
15205 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15206 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15207 | CPUMCTX_EXTRN_APIC_TPR);
15208 Assert(iCrReg < 16);
15209 Assert(iGReg < 16);
15210
15211 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15212 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15213 Assert(!pVCpu->iem.s.cActiveMappings);
15214 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15215}
15216
15217
15218/**
15219 * Interface for HM and EM to clear the CR0[TS] bit.
15220 *
15221 * @returns Strict VBox status code.
15222 * @param pVCpu The cross context virtual CPU structure.
15223 * @param cbInstr The instruction length in bytes.
15224 *
15225 * @remarks In ring-0 not all of the state needs to be synced in.
15226 */
15227VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15228{
15229 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15230
15231 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15232 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15233 Assert(!pVCpu->iem.s.cActiveMappings);
15234 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15235}
15236
15237
15238/**
15239 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15240 *
15241 * @returns Strict VBox status code.
15242 * @param pVCpu The cross context virtual CPU structure.
15243 * @param cbInstr The instruction length in bytes.
15244 * @param uValue The value to load into CR0.
15245 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15246 * memory operand. Otherwise pass NIL_RTGCPTR.
15247 *
15248 * @remarks In ring-0 not all of the state needs to be synced in.
15249 */
15250VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15251{
15252 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15253
15254 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15255 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15256 Assert(!pVCpu->iem.s.cActiveMappings);
15257 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15258}
15259
15260
15261/**
15262 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15263 *
15264 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15265 *
15266 * @returns Strict VBox status code.
15267 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15268 * @param cbInstr The instruction length in bytes.
15269 * @remarks In ring-0 not all of the state needs to be synced in.
15270 * @thread EMT(pVCpu)
15271 */
15272VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15273{
15274 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15275
15276 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15277 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15278 Assert(!pVCpu->iem.s.cActiveMappings);
15279 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15280}
15281
15282
15283/**
15284 * Interface for HM and EM to emulate the WBINVD instruction.
15285 *
15286 * @returns Strict VBox status code.
15287 * @param pVCpu The cross context virtual CPU structure.
15288 * @param cbInstr The instruction length in bytes.
15289 *
15290 * @remarks In ring-0 not all of the state needs to be synced in.
15291 */
15292VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15293{
15294 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15295
15296 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15297 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15298 Assert(!pVCpu->iem.s.cActiveMappings);
15299 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15300}
15301
15302
15303/**
15304 * Interface for HM and EM to emulate the INVD instruction.
15305 *
15306 * @returns Strict VBox status code.
15307 * @param pVCpu The cross context virtual CPU structure.
15308 * @param cbInstr The instruction length in bytes.
15309 *
15310 * @remarks In ring-0 not all of the state needs to be synced in.
15311 */
15312VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15313{
15314 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15315
15316 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15317 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15318 Assert(!pVCpu->iem.s.cActiveMappings);
15319 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15320}
15321
15322
15323/**
15324 * Interface for HM and EM to emulate the INVLPG instruction.
15325 *
15326 * @returns Strict VBox status code.
15327 * @retval VINF_PGM_SYNC_CR3
15328 *
15329 * @param pVCpu The cross context virtual CPU structure.
15330 * @param cbInstr The instruction length in bytes.
15331 * @param GCPtrPage The effective address of the page to invalidate.
15332 *
15333 * @remarks In ring-0 not all of the state needs to be synced in.
15334 */
15335VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15336{
15337 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15338
15339 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15340 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15341 Assert(!pVCpu->iem.s.cActiveMappings);
15342 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15343}
15344
15345
15346/**
15347 * Interface for HM and EM to emulate the CPUID instruction.
15348 *
15349 * @returns Strict VBox status code.
15350 *
15351 * @param pVCpu The cross context virtual CPU structure.
15352 * @param cbInstr The instruction length in bytes.
15353 *
15354 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15355 */
15356VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15357{
15358 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15359 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15360
15361 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15362 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15363 Assert(!pVCpu->iem.s.cActiveMappings);
15364 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15365}
15366
15367
15368/**
15369 * Interface for HM and EM to emulate the RDPMC instruction.
15370 *
15371 * @returns Strict VBox status code.
15372 *
15373 * @param pVCpu The cross context virtual CPU structure.
15374 * @param cbInstr The instruction length in bytes.
15375 *
15376 * @remarks Not all of the state needs to be synced in.
15377 */
15378VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15379{
15380 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15381 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15382
15383 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15384 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15385 Assert(!pVCpu->iem.s.cActiveMappings);
15386 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15387}
15388
15389
15390/**
15391 * Interface for HM and EM to emulate the RDTSC instruction.
15392 *
15393 * @returns Strict VBox status code.
15394 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15395 *
15396 * @param pVCpu The cross context virtual CPU structure.
15397 * @param cbInstr The instruction length in bytes.
15398 *
15399 * @remarks Not all of the state needs to be synced in.
15400 */
15401VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15402{
15403 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15404 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15405
15406 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15407 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15408 Assert(!pVCpu->iem.s.cActiveMappings);
15409 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15410}
15411
15412
15413/**
15414 * Interface for HM and EM to emulate the RDTSCP instruction.
15415 *
15416 * @returns Strict VBox status code.
15417 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15418 *
15419 * @param pVCpu The cross context virtual CPU structure.
15420 * @param cbInstr The instruction length in bytes.
15421 *
15422 * @remarks Not all of the state needs to be synced in. Recommended
15423 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15424 */
15425VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15426{
15427 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15428 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15429
15430 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15431 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15432 Assert(!pVCpu->iem.s.cActiveMappings);
15433 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15434}
15435
15436
15437/**
15438 * Interface for HM and EM to emulate the RDMSR instruction.
15439 *
15440 * @returns Strict VBox status code.
15441 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15442 *
15443 * @param pVCpu The cross context virtual CPU structure.
15444 * @param cbInstr The instruction length in bytes.
15445 *
15446 * @remarks Not all of the state needs to be synced in. Requires RCX and
15447 * (currently) all MSRs.
15448 */
15449VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15450{
15451 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15452 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15453
15454 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15455 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15456 Assert(!pVCpu->iem.s.cActiveMappings);
15457 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15458}
15459
15460
15461/**
15462 * Interface for HM and EM to emulate the WRMSR instruction.
15463 *
15464 * @returns Strict VBox status code.
15465 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15466 *
15467 * @param pVCpu The cross context virtual CPU structure.
15468 * @param cbInstr The instruction length in bytes.
15469 *
15470 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15471 * and (currently) all MSRs.
15472 */
15473VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15474{
15475 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15476 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15477 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15478
15479 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15480 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15481 Assert(!pVCpu->iem.s.cActiveMappings);
15482 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15483}
15484
15485
15486/**
15487 * Interface for HM and EM to emulate the MONITOR instruction.
15488 *
15489 * @returns Strict VBox status code.
15490 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15491 *
15492 * @param pVCpu The cross context virtual CPU structure.
15493 * @param cbInstr The instruction length in bytes.
15494 *
15495 * @remarks Not all of the state needs to be synced in.
15496 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15497 * are used.
15498 */
15499VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15500{
15501 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15502 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15503
15504 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15505 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15506 Assert(!pVCpu->iem.s.cActiveMappings);
15507 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15508}
15509
15510
15511/**
15512 * Interface for HM and EM to emulate the MWAIT instruction.
15513 *
15514 * @returns Strict VBox status code.
15515 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15516 *
15517 * @param pVCpu The cross context virtual CPU structure.
15518 * @param cbInstr The instruction length in bytes.
15519 *
15520 * @remarks Not all of the state needs to be synced in.
15521 */
15522VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15523{
15524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15525
15526 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15527 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15528 Assert(!pVCpu->iem.s.cActiveMappings);
15529 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15530}
15531
15532
15533/**
15534 * Interface for HM and EM to emulate the HLT instruction.
15535 *
15536 * @returns Strict VBox status code.
15537 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15538 *
15539 * @param pVCpu The cross context virtual CPU structure.
15540 * @param cbInstr The instruction length in bytes.
15541 *
15542 * @remarks Not all of the state needs to be synced in.
15543 */
15544VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15545{
15546 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15547
15548 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15549 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15550 Assert(!pVCpu->iem.s.cActiveMappings);
15551 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15552}
15553
15554
15555/**
15556 * Checks if IEM is in the process of delivering an event (interrupt or
15557 * exception).
15558 *
15559 * @returns true if we're in the process of raising an interrupt or exception,
15560 * false otherwise.
15561 * @param pVCpu The cross context virtual CPU structure.
15562 * @param puVector Where to store the vector associated with the
15563 * currently delivered event, optional.
15564 * @param pfFlags Where to store th event delivery flags (see
15565 * IEM_XCPT_FLAGS_XXX), optional.
15566 * @param puErr Where to store the error code associated with the
15567 * event, optional.
15568 * @param puCr2 Where to store the CR2 associated with the event,
15569 * optional.
15570 * @remarks The caller should check the flags to determine if the error code and
15571 * CR2 are valid for the event.
15572 */
15573VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15574{
15575 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15576 if (fRaisingXcpt)
15577 {
15578 if (puVector)
15579 *puVector = pVCpu->iem.s.uCurXcpt;
15580 if (pfFlags)
15581 *pfFlags = pVCpu->iem.s.fCurXcpt;
15582 if (puErr)
15583 *puErr = pVCpu->iem.s.uCurXcptErr;
15584 if (puCr2)
15585 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15586 }
15587 return fRaisingXcpt;
15588}
15589
15590#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15591
15592/**
15593 * Interface for HM and EM to emulate the CLGI instruction.
15594 *
15595 * @returns Strict VBox status code.
15596 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15597 * @param cbInstr The instruction length in bytes.
15598 * @thread EMT(pVCpu)
15599 */
15600VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15601{
15602 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15603
15604 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15605 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15606 Assert(!pVCpu->iem.s.cActiveMappings);
15607 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15608}
15609
15610
15611/**
15612 * Interface for HM and EM to emulate the STGI instruction.
15613 *
15614 * @returns Strict VBox status code.
15615 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15616 * @param cbInstr The instruction length in bytes.
15617 * @thread EMT(pVCpu)
15618 */
15619VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15620{
15621 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15622
15623 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15624 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15625 Assert(!pVCpu->iem.s.cActiveMappings);
15626 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15627}
15628
15629
15630/**
15631 * Interface for HM and EM to emulate the VMLOAD instruction.
15632 *
15633 * @returns Strict VBox status code.
15634 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15635 * @param cbInstr The instruction length in bytes.
15636 * @thread EMT(pVCpu)
15637 */
15638VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15639{
15640 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15641
15642 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15643 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15644 Assert(!pVCpu->iem.s.cActiveMappings);
15645 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15646}
15647
15648
15649/**
15650 * Interface for HM and EM to emulate the VMSAVE instruction.
15651 *
15652 * @returns Strict VBox status code.
15653 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15654 * @param cbInstr The instruction length in bytes.
15655 * @thread EMT(pVCpu)
15656 */
15657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15658{
15659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15660
15661 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15662 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15663 Assert(!pVCpu->iem.s.cActiveMappings);
15664 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15665}
15666
15667
15668/**
15669 * Interface for HM and EM to emulate the INVLPGA instruction.
15670 *
15671 * @returns Strict VBox status code.
15672 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15673 * @param cbInstr The instruction length in bytes.
15674 * @thread EMT(pVCpu)
15675 */
15676VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15677{
15678 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15679
15680 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15681 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15682 Assert(!pVCpu->iem.s.cActiveMappings);
15683 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15684}
15685
15686
15687/**
15688 * Interface for HM and EM to emulate the VMRUN instruction.
15689 *
15690 * @returns Strict VBox status code.
15691 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15692 * @param cbInstr The instruction length in bytes.
15693 * @thread EMT(pVCpu)
15694 */
15695VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15696{
15697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15698 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15699
15700 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15701 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15702 Assert(!pVCpu->iem.s.cActiveMappings);
15703 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15704}
15705
15706
15707/**
15708 * Interface for HM and EM to emulate \#VMEXIT.
15709 *
15710 * @returns Strict VBox status code.
15711 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15712 * @param uExitCode The exit code.
15713 * @param uExitInfo1 The exit info. 1 field.
15714 * @param uExitInfo2 The exit info. 2 field.
15715 * @thread EMT(pVCpu)
15716 */
15717VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15718{
15719 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15720 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15721 if (pVCpu->iem.s.cActiveMappings)
15722 iemMemRollback(pVCpu);
15723 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15724}
15725
15726#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15727
15728#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15729
15730/**
15731 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15732 *
15733 * @returns Strict VBox status code.
15734 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15735 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15736 * the x2APIC device.
15737 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15738 *
15739 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15740 * @param idMsr The MSR being read.
15741 * @param pu64Value Pointer to the value being written or where to store the
15742 * value being read.
15743 * @param fWrite Whether this is an MSR write or read access.
15744 * @thread EMT(pVCpu)
15745 */
15746VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15747{
15748 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
15749 Assert(pu64Value);
15750
15751 VBOXSTRICTRC rcStrict;
15752 if (!fWrite)
15753 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15754 else
15755 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15756 if (pVCpu->iem.s.cActiveMappings)
15757 iemMemRollback(pVCpu);
15758 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15759
15760}
15761
15762
15763/**
15764 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15765 *
15766 * @returns Strict VBox status code.
15767 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15768 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15769 *
15770 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15771 * @param offAccess The offset of the register being accessed (within the
15772 * APIC-access page).
15773 * @param cbAccess The size of the access in bytes.
15774 * @param pvData Pointer to the data being written or where to store the data
15775 * being read.
15776 * @param fWrite Whether this is a write or read access.
15777 * @thread EMT(pVCpu)
15778 */
15779VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData,
15780 bool fWrite)
15781{
15782 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15783 Assert(pvData);
15784
15785 /** @todo NSTVMX: Unfortunately, the caller has no idea about instruction fetch
15786 * accesses, so we only use read/write here. Maybe in the future the PGM
15787 * physical handler will be extended to include this information? */
15788 uint32_t const fAccess = fWrite ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
15789 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbAccess, pvData, fAccess);
15790 if (pVCpu->iem.s.cActiveMappings)
15791 iemMemRollback(pVCpu);
15792 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15793}
15794
15795
15796/**
15797 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15798 * VM-exit.
15799 *
15800 * @returns Strict VBox status code.
15801 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15802 * @thread EMT(pVCpu)
15803 */
15804VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPU pVCpu)
15805{
15806 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15807
15808 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15809 if (pVCpu->iem.s.cActiveMappings)
15810 iemMemRollback(pVCpu);
15811 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15812}
15813
15814
15815/**
15816 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15817 *
15818 * @returns Strict VBox status code.
15819 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15820 * @thread EMT(pVCpu)
15821 */
15822VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15823{
15824 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15825 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15826 if (pVCpu->iem.s.cActiveMappings)
15827 iemMemRollback(pVCpu);
15828 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15829}
15830
15831
15832/**
15833 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15834 *
15835 * @returns Strict VBox status code.
15836 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15837 * @param uVector The external interrupt vector (pass 0 if the external
15838 * interrupt is still pending).
15839 * @param fIntPending Whether the external interrupt is pending or
15840 * acknowdledged in the interrupt controller.
15841 * @thread EMT(pVCpu)
15842 */
15843VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15844{
15845 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15846 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15847 if (pVCpu->iem.s.cActiveMappings)
15848 iemMemRollback(pVCpu);
15849 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15850}
15851
15852
15853/**
15854 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15855 *
15856 * @returns Strict VBox status code.
15857 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15858 * @param uVector The SIPI vector.
15859 * @thread EMT(pVCpu)
15860 */
15861VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15862{
15863 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15864 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15865 if (pVCpu->iem.s.cActiveMappings)
15866 iemMemRollback(pVCpu);
15867 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15868}
15869
15870
15871/**
15872 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15873 *
15874 * @returns Strict VBox status code.
15875 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15876 * @thread EMT(pVCpu)
15877 */
15878VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15879{
15880 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15881 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15882 if (pVCpu->iem.s.cActiveMappings)
15883 iemMemRollback(pVCpu);
15884 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15885}
15886
15887
15888/**
15889 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15890 *
15891 * @returns Strict VBox status code.
15892 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15893 * @thread EMT(pVCpu)
15894 */
15895VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15896{
15897 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15898 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15899 if (pVCpu->iem.s.cActiveMappings)
15900 iemMemRollback(pVCpu);
15901 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15902}
15903
15904
15905/**
15906 * Interface for HM and EM to emulate VM-exits Monitor-Trap Flag (MTF).
15907 *
15908 * @returns Strict VBox status code.
15909 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15910 * @thread EMT(pVCpu)
15911 */
15912VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitMtf(PVMCPU pVCpu)
15913{
15914 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15915 VBOXSTRICTRC rcStrict = iemVmxVmexitMtf(pVCpu);
15916 if (pVCpu->iem.s.cActiveMappings)
15917 iemMemRollback(pVCpu);
15918 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15919}
15920
15921
15922/**
15923 * Interface for HM and EM to emulate the VMREAD instruction.
15924 *
15925 * @returns Strict VBox status code.
15926 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15927 * @param pExitInfo Pointer to the VM-exit information struct.
15928 * @thread EMT(pVCpu)
15929 */
15930VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15931{
15932 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15933 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15934 Assert(pExitInfo);
15935
15936 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15937
15938 VBOXSTRICTRC rcStrict;
15939 uint8_t const cbInstr = pExitInfo->cbInstr;
15940 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15941 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15942 {
15943 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15944 {
15945 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15946 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15947 }
15948 else
15949 {
15950 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15951 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15952 }
15953 }
15954 else
15955 {
15956 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15957 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15958 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15959 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15960 }
15961 if (pVCpu->iem.s.cActiveMappings)
15962 iemMemRollback(pVCpu);
15963 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15964}
15965
15966
15967/**
15968 * Interface for HM and EM to emulate the VMWRITE instruction.
15969 *
15970 * @returns Strict VBox status code.
15971 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15972 * @param pExitInfo Pointer to the VM-exit information struct.
15973 * @thread EMT(pVCpu)
15974 */
15975VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15976{
15977 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15978 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15979 Assert(pExitInfo);
15980
15981 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15982
15983 uint64_t u64Val;
15984 uint8_t iEffSeg;
15985 IEMMODE enmEffAddrMode;
15986 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15987 {
15988 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15989 iEffSeg = UINT8_MAX;
15990 enmEffAddrMode = UINT8_MAX;
15991 }
15992 else
15993 {
15994 u64Val = pExitInfo->GCPtrEffAddr;
15995 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15996 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15997 }
15998 uint8_t const cbInstr = pExitInfo->cbInstr;
15999 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16000 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
16001 if (pVCpu->iem.s.cActiveMappings)
16002 iemMemRollback(pVCpu);
16003 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16004}
16005
16006
16007/**
16008 * Interface for HM and EM to emulate the VMPTRLD instruction.
16009 *
16010 * @returns Strict VBox status code.
16011 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16012 * @param pExitInfo Pointer to the VM-exit information struct.
16013 * @thread EMT(pVCpu)
16014 */
16015VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16016{
16017 Assert(pExitInfo);
16018 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16019 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16020
16021 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16022
16023 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16024 uint8_t const cbInstr = pExitInfo->cbInstr;
16025 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16026 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16027 if (pVCpu->iem.s.cActiveMappings)
16028 iemMemRollback(pVCpu);
16029 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16030}
16031
16032
16033/**
16034 * Interface for HM and EM to emulate the VMPTRST instruction.
16035 *
16036 * @returns Strict VBox status code.
16037 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16038 * @param pExitInfo Pointer to the VM-exit information struct.
16039 * @thread EMT(pVCpu)
16040 */
16041VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16042{
16043 Assert(pExitInfo);
16044 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16045 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16046
16047 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16048
16049 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16050 uint8_t const cbInstr = pExitInfo->cbInstr;
16051 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16052 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16053 if (pVCpu->iem.s.cActiveMappings)
16054 iemMemRollback(pVCpu);
16055 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16056}
16057
16058
16059/**
16060 * Interface for HM and EM to emulate the VMCLEAR instruction.
16061 *
16062 * @returns Strict VBox status code.
16063 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16064 * @param pExitInfo Pointer to the VM-exit information struct.
16065 * @thread EMT(pVCpu)
16066 */
16067VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16068{
16069 Assert(pExitInfo);
16070 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16071 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16072
16073 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16074
16075 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16076 uint8_t const cbInstr = pExitInfo->cbInstr;
16077 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16078 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16079 if (pVCpu->iem.s.cActiveMappings)
16080 iemMemRollback(pVCpu);
16081 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16082}
16083
16084
16085/**
16086 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16087 *
16088 * @returns Strict VBox status code.
16089 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16090 * @param cbInstr The instruction length in bytes.
16091 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16092 * VMXINSTRID_VMRESUME).
16093 * @thread EMT(pVCpu)
16094 */
16095VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPU pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16096{
16097 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16098 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16099
16100 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16101 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16102 if (pVCpu->iem.s.cActiveMappings)
16103 iemMemRollback(pVCpu);
16104 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16105}
16106
16107
16108/**
16109 * Interface for HM and EM to emulate the VMXON instruction.
16110 *
16111 * @returns Strict VBox status code.
16112 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16113 * @param pExitInfo Pointer to the VM-exit information struct.
16114 * @thread EMT(pVCpu)
16115 */
16116VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
16117{
16118 Assert(pExitInfo);
16119 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16120 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16121
16122 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16123
16124 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16125 uint8_t const cbInstr = pExitInfo->cbInstr;
16126 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16127 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16128 if (pVCpu->iem.s.cActiveMappings)
16129 iemMemRollback(pVCpu);
16130 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16131}
16132
16133
16134/**
16135 * Interface for HM and EM to emulate the VMXOFF instruction.
16136 *
16137 * @returns Strict VBox status code.
16138 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16139 * @param cbInstr The instruction length in bytes.
16140 * @thread EMT(pVCpu)
16141 */
16142VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
16143{
16144 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16145 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16146
16147 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16148 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16149 Assert(!pVCpu->iem.s.cActiveMappings);
16150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16151}
16152
16153
16154/**
16155 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16156 *
16157 * @remarks The @a pvUser argument is currently unused.
16158 */
16159PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16160 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16161 PGMACCESSORIGIN enmOrigin, void *pvUser)
16162{
16163 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16164
16165 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16166 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16167 {
16168 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16169 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16170
16171 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16172 * Currently they will go through as read accesses. */
16173 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16174 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16175 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16176 if (RT_FAILURE(rcStrict))
16177 return rcStrict;
16178
16179 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16180 return VINF_SUCCESS;
16181 }
16182
16183 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16184 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16185 if (RT_FAILURE(rc))
16186 return rc;
16187
16188 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16189 return VINF_PGM_HANDLER_DO_DEFAULT;
16190}
16191
16192#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16193
16194#ifdef IN_RING3
16195
16196/**
16197 * Handles the unlikely and probably fatal merge cases.
16198 *
16199 * @returns Merged status code.
16200 * @param rcStrict Current EM status code.
16201 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16202 * with @a rcStrict.
16203 * @param iMemMap The memory mapping index. For error reporting only.
16204 * @param pVCpu The cross context virtual CPU structure of the calling
16205 * thread, for error reporting only.
16206 */
16207DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16208 unsigned iMemMap, PVMCPU pVCpu)
16209{
16210 if (RT_FAILURE_NP(rcStrict))
16211 return rcStrict;
16212
16213 if (RT_FAILURE_NP(rcStrictCommit))
16214 return rcStrictCommit;
16215
16216 if (rcStrict == rcStrictCommit)
16217 return rcStrictCommit;
16218
16219 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16220 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16221 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16222 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16223 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16224 return VERR_IOM_FF_STATUS_IPE;
16225}
16226
16227
16228/**
16229 * Helper for IOMR3ProcessForceFlag.
16230 *
16231 * @returns Merged status code.
16232 * @param rcStrict Current EM status code.
16233 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16234 * with @a rcStrict.
16235 * @param iMemMap The memory mapping index. For error reporting only.
16236 * @param pVCpu The cross context virtual CPU structure of the calling
16237 * thread, for error reporting only.
16238 */
16239DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16240{
16241 /* Simple. */
16242 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16243 return rcStrictCommit;
16244
16245 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16246 return rcStrict;
16247
16248 /* EM scheduling status codes. */
16249 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16250 && rcStrict <= VINF_EM_LAST))
16251 {
16252 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16253 && rcStrictCommit <= VINF_EM_LAST))
16254 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16255 }
16256
16257 /* Unlikely */
16258 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16259}
16260
16261
16262/**
16263 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16264 *
16265 * @returns Merge between @a rcStrict and what the commit operation returned.
16266 * @param pVM The cross context VM structure.
16267 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16268 * @param rcStrict The status code returned by ring-0 or raw-mode.
16269 */
16270VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16271{
16272 /*
16273 * Reset the pending commit.
16274 */
16275 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16276 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16277 ("%#x %#x %#x\n",
16278 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16279 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16280
16281 /*
16282 * Commit the pending bounce buffers (usually just one).
16283 */
16284 unsigned cBufs = 0;
16285 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16286 while (iMemMap-- > 0)
16287 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16288 {
16289 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16290 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16291 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16292
16293 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16294 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16295 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16296
16297 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16298 {
16299 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16300 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16301 pbBuf,
16302 cbFirst,
16303 PGMACCESSORIGIN_IEM);
16304 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16305 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16306 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16307 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16308 }
16309
16310 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16311 {
16312 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16314 pbBuf + cbFirst,
16315 cbSecond,
16316 PGMACCESSORIGIN_IEM);
16317 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16318 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16319 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16320 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16321 }
16322 cBufs++;
16323 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16324 }
16325
16326 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16327 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16328 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16329 pVCpu->iem.s.cActiveMappings = 0;
16330 return rcStrict;
16331}
16332
16333#endif /* IN_RING3 */
16334
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette